Print this page
rev 2724 : 6484965: G1: piggy-back liveness accounting phase on marking
Summary: Remove the separate counting phase of concurrent marking by tracking the amount of marked bytes and the cards spanned by marked objects in marking task/worker thread local data structures, which are updated as individual objects are marked.
Reviewed-by:
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27 27
28 28 #include "gc_implementation/g1/concurrentMark.hpp"
29 29 #include "gc_implementation/g1/g1AllocRegion.hpp"
30 30 #include "gc_implementation/g1/g1HRPrinter.hpp"
31 31 #include "gc_implementation/g1/g1RemSet.hpp"
32 32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
33 33 #include "gc_implementation/g1/heapRegionSeq.hpp"
34 34 #include "gc_implementation/g1/heapRegionSets.hpp"
35 35 #include "gc_implementation/shared/hSpaceCounters.hpp"
36 36 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
37 37 #include "memory/barrierSet.hpp"
38 38 #include "memory/memRegion.hpp"
39 39 #include "memory/sharedHeap.hpp"
40 40
41 41 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
42 42 // It uses the "Garbage First" heap organization and algorithm, which
43 43 // may combine concurrent marking with parallel, incremental compaction of
44 44 // heap subsets that will yield large amounts of garbage.
45 45
46 46 class HeapRegion;
47 47 class HRRSCleanupTask;
48 48 class PermanentGenerationSpec;
49 49 class GenerationSpec;
50 50 class OopsInHeapRegionClosure;
51 51 class G1ScanHeapEvacClosure;
52 52 class ObjectClosure;
53 53 class SpaceClosure;
54 54 class CompactibleSpaceClosure;
55 55 class Space;
56 56 class G1CollectorPolicy;
57 57 class GenRemSet;
58 58 class G1RemSet;
59 59 class HeapRegionRemSetIterator;
60 60 class ConcurrentMark;
61 61 class ConcurrentMarkThread;
62 62 class ConcurrentG1Refine;
63 63 class GenerationCounters;
64 64
65 65 typedef OverflowTaskQueue<StarTask> RefToScanQueue;
66 66 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
67 67
68 68 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
69 69 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
70 70
71 71 enum GCAllocPurpose {
72 72 GCAllocForTenured,
73 73 GCAllocForSurvived,
74 74 GCAllocPurposeCount
75 75 };
76 76
77 77 class YoungList : public CHeapObj {
78 78 private:
79 79 G1CollectedHeap* _g1h;
80 80
81 81 HeapRegion* _head;
82 82
83 83 HeapRegion* _survivor_head;
84 84 HeapRegion* _survivor_tail;
85 85
86 86 HeapRegion* _curr;
87 87
88 88 size_t _length;
89 89 size_t _survivor_length;
90 90
91 91 size_t _last_sampled_rs_lengths;
92 92 size_t _sampled_rs_lengths;
93 93
94 94 void empty_list(HeapRegion* list);
95 95
96 96 public:
97 97 YoungList(G1CollectedHeap* g1h);
98 98
99 99 void push_region(HeapRegion* hr);
100 100 void add_survivor_region(HeapRegion* hr);
101 101
102 102 void empty_list();
103 103 bool is_empty() { return _length == 0; }
104 104 size_t length() { return _length; }
105 105 size_t survivor_length() { return _survivor_length; }
106 106
107 107 // Currently we do not keep track of the used byte sum for the
108 108 // young list and the survivors and it'd be quite a lot of work to
109 109 // do so. When we'll eventually replace the young list with
110 110 // instances of HeapRegionLinkedList we'll get that for free. So,
111 111 // we'll report the more accurate information then.
112 112 size_t eden_used_bytes() {
113 113 assert(length() >= survivor_length(), "invariant");
114 114 return (length() - survivor_length()) * HeapRegion::GrainBytes;
115 115 }
116 116 size_t survivor_used_bytes() {
117 117 return survivor_length() * HeapRegion::GrainBytes;
118 118 }
119 119
120 120 void rs_length_sampling_init();
121 121 bool rs_length_sampling_more();
122 122 void rs_length_sampling_next();
123 123
124 124 void reset_sampled_info() {
125 125 _last_sampled_rs_lengths = 0;
126 126 }
127 127 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
128 128
129 129 // for development purposes
130 130 void reset_auxilary_lists();
131 131 void clear() { _head = NULL; _length = 0; }
132 132
133 133 void clear_survivors() {
134 134 _survivor_head = NULL;
135 135 _survivor_tail = NULL;
136 136 _survivor_length = 0;
137 137 }
138 138
139 139 HeapRegion* first_region() { return _head; }
140 140 HeapRegion* first_survivor_region() { return _survivor_head; }
141 141 HeapRegion* last_survivor_region() { return _survivor_tail; }
142 142
143 143 // debugging
144 144 bool check_list_well_formed();
145 145 bool check_list_empty(bool check_sample = true);
146 146 void print();
147 147 };
148 148
149 149 class MutatorAllocRegion : public G1AllocRegion {
150 150 protected:
151 151 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
152 152 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
153 153 public:
154 154 MutatorAllocRegion()
155 155 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
156 156 };
157 157
158 158 // The G1 STW is alive closure.
159 159 // An instance is embedded into the G1CH and used as the
160 160 // (optional) _is_alive_non_header closure in the STW
161 161 // reference processor. It is also extensively used during
162 162 // refence processing during STW evacuation pauses.
163 163 class G1STWIsAliveClosure: public BoolObjectClosure {
164 164 G1CollectedHeap* _g1;
165 165 public:
166 166 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
167 167 void do_object(oop p) { assert(false, "Do not call."); }
168 168 bool do_object_b(oop p);
169 169 };
170 170
171 171 class SurvivorGCAllocRegion : public G1AllocRegion {
172 172 protected:
173 173 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
174 174 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
175 175 public:
176 176 SurvivorGCAllocRegion()
177 177 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
178 178 };
179 179
180 180 class OldGCAllocRegion : public G1AllocRegion {
181 181 protected:
182 182 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
183 183 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
184 184 public:
185 185 OldGCAllocRegion()
186 186 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
187 187 };
188 188
189 189 class RefineCardTableEntryClosure;
190 190
191 191 class G1CollectedHeap : public SharedHeap {
192 192 friend class VM_G1CollectForAllocation;
193 193 friend class VM_GenCollectForPermanentAllocation;
194 194 friend class VM_G1CollectFull;
195 195 friend class VM_G1IncCollectionPause;
196 196 friend class VMStructs;
197 197 friend class MutatorAllocRegion;
198 198 friend class SurvivorGCAllocRegion;
199 199 friend class OldGCAllocRegion;
200 200
201 201 // Closures used in implementation.
202 202 friend class G1ParCopyHelper;
203 203 friend class G1IsAliveClosure;
204 204 friend class G1EvacuateFollowersClosure;
205 205 friend class G1ParScanThreadState;
206 206 friend class G1ParScanClosureSuper;
207 207 friend class G1ParEvacuateFollowersClosure;
208 208 friend class G1ParTask;
209 209 friend class G1FreeGarbageRegionClosure;
210 210 friend class RefineCardTableEntryClosure;
211 211 friend class G1PrepareCompactClosure;
212 212 friend class RegionSorter;
213 213 friend class RegionResetter;
214 214 friend class CountRCClosure;
215 215 friend class EvacPopObjClosure;
216 216 friend class G1ParCleanupCTTask;
217 217
218 218 // Other related classes.
219 219 friend class G1MarkSweep;
220 220
221 221 private:
222 222 // The one and only G1CollectedHeap, so static functions can find it.
223 223 static G1CollectedHeap* _g1h;
224 224
225 225 static size_t _humongous_object_threshold_in_words;
226 226
227 227 // Storage for the G1 heap (excludes the permanent generation).
228 228 VirtualSpace _g1_storage;
229 229 MemRegion _g1_reserved;
230 230
231 231 // The part of _g1_storage that is currently committed.
232 232 MemRegion _g1_committed;
233 233
234 234 // The master free list. It will satisfy all new region allocations.
235 235 MasterFreeRegionList _free_list;
236 236
237 237 // The secondary free list which contains regions that have been
238 238 // freed up during the cleanup process. This will be appended to the
239 239 // master free list when appropriate.
240 240 SecondaryFreeRegionList _secondary_free_list;
241 241
242 242 // It keeps track of the humongous regions.
243 243 MasterHumongousRegionSet _humongous_set;
244 244
245 245 // The number of regions we could create by expansion.
246 246 size_t _expansion_regions;
247 247
248 248 // The block offset table for the G1 heap.
249 249 G1BlockOffsetSharedArray* _bot_shared;
250 250
251 251 // Move all of the regions off the free lists, then rebuild those free
252 252 // lists, before and after full GC.
253 253 void tear_down_region_lists();
254 254 void rebuild_region_lists();
255 255
256 256 // The sequence of all heap regions in the heap.
257 257 HeapRegionSeq _hrs;
258 258
259 259 // Alloc region used to satisfy mutator allocation requests.
260 260 MutatorAllocRegion _mutator_alloc_region;
261 261
262 262 // Alloc region used to satisfy allocation requests by the GC for
263 263 // survivor objects.
264 264 SurvivorGCAllocRegion _survivor_gc_alloc_region;
265 265
266 266 // Alloc region used to satisfy allocation requests by the GC for
267 267 // old objects.
268 268 OldGCAllocRegion _old_gc_alloc_region;
269 269
270 270 // The last old region we allocated to during the last GC.
271 271 // Typically, it is not full so we should re-use it during the next GC.
272 272 HeapRegion* _retained_old_gc_alloc_region;
273 273
274 274 // It resets the mutator alloc region before new allocations can take place.
275 275 void init_mutator_alloc_region();
276 276
277 277 // It releases the mutator alloc region.
278 278 void release_mutator_alloc_region();
279 279
280 280 // It initializes the GC alloc regions at the start of a GC.
281 281 void init_gc_alloc_regions();
282 282
283 283 // It releases the GC alloc regions at the end of a GC.
284 284 void release_gc_alloc_regions();
285 285
286 286 // It does any cleanup that needs to be done on the GC alloc regions
287 287 // before a Full GC.
288 288 void abandon_gc_alloc_regions();
289 289
290 290 // Helper for monitoring and management support.
291 291 G1MonitoringSupport* _g1mm;
292 292
293 293 // Determines PLAB size for a particular allocation purpose.
294 294 static size_t desired_plab_sz(GCAllocPurpose purpose);
295 295
296 296 // Outside of GC pauses, the number of bytes used in all regions other
297 297 // than the current allocation region.
298 298 size_t _summary_bytes_used;
299 299
300 300 // This is used for a quick test on whether a reference points into
301 301 // the collection set or not. Basically, we have an array, with one
302 302 // byte per region, and that byte denotes whether the corresponding
303 303 // region is in the collection set or not. The entry corresponding
304 304 // the bottom of the heap, i.e., region 0, is pointed to by
305 305 // _in_cset_fast_test_base. The _in_cset_fast_test field has been
306 306 // biased so that it actually points to address 0 of the address
307 307 // space, to make the test as fast as possible (we can simply shift
308 308 // the address to address into it, instead of having to subtract the
309 309 // bottom of the heap from the address before shifting it; basically
310 310 // it works in the same way the card table works).
311 311 bool* _in_cset_fast_test;
312 312
313 313 // The allocated array used for the fast test on whether a reference
314 314 // points into the collection set or not. This field is also used to
315 315 // free the array.
316 316 bool* _in_cset_fast_test_base;
317 317
318 318 // The length of the _in_cset_fast_test_base array.
319 319 size_t _in_cset_fast_test_length;
320 320
321 321 volatile unsigned _gc_time_stamp;
322 322
323 323 size_t* _surviving_young_words;
324 324
325 325 G1HRPrinter _hr_printer;
326 326
327 327 void setup_surviving_young_words();
328 328 void update_surviving_young_words(size_t* surv_young_words);
329 329 void cleanup_surviving_young_words();
330 330
331 331 // It decides whether an explicit GC should start a concurrent cycle
332 332 // instead of doing a STW GC. Currently, a concurrent cycle is
333 333 // explicitly started if:
334 334 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
335 335 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
336 336 bool should_do_concurrent_full_gc(GCCause::Cause cause);
337 337
338 338 // Keeps track of how many "full collections" (i.e., Full GCs or
339 339 // concurrent cycles) we have completed. The number of them we have
340 340 // started is maintained in _total_full_collections in CollectedHeap.
341 341 volatile unsigned int _full_collections_completed;
342 342
343 343 // This is a non-product method that is helpful for testing. It is
344 344 // called at the end of a GC and artificially expands the heap by
345 345 // allocating a number of dead regions. This way we can induce very
346 346 // frequent marking cycles and stress the cleanup / concurrent
347 347 // cleanup code more (as all the regions that will be allocated by
348 348 // this method will be found dead by the marking cycle).
349 349 void allocate_dummy_regions() PRODUCT_RETURN;
350 350
351 351 // These are macros so that, if the assert fires, we get the correct
352 352 // line number, file, etc.
353 353
354 354 #define heap_locking_asserts_err_msg(_extra_message_) \
355 355 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
356 356 (_extra_message_), \
357 357 BOOL_TO_STR(Heap_lock->owned_by_self()), \
358 358 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
359 359 BOOL_TO_STR(Thread::current()->is_VM_thread()))
360 360
361 361 #define assert_heap_locked() \
362 362 do { \
363 363 assert(Heap_lock->owned_by_self(), \
364 364 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
365 365 } while (0)
366 366
367 367 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
368 368 do { \
369 369 assert(Heap_lock->owned_by_self() || \
370 370 (SafepointSynchronize::is_at_safepoint() && \
371 371 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
372 372 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
373 373 "should be at a safepoint")); \
374 374 } while (0)
375 375
376 376 #define assert_heap_locked_and_not_at_safepoint() \
377 377 do { \
378 378 assert(Heap_lock->owned_by_self() && \
379 379 !SafepointSynchronize::is_at_safepoint(), \
380 380 heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
381 381 "should not be at a safepoint")); \
382 382 } while (0)
383 383
384 384 #define assert_heap_not_locked() \
385 385 do { \
386 386 assert(!Heap_lock->owned_by_self(), \
387 387 heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
388 388 } while (0)
389 389
390 390 #define assert_heap_not_locked_and_not_at_safepoint() \
391 391 do { \
392 392 assert(!Heap_lock->owned_by_self() && \
393 393 !SafepointSynchronize::is_at_safepoint(), \
394 394 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
395 395 "should not be at a safepoint")); \
396 396 } while (0)
397 397
398 398 #define assert_at_safepoint(_should_be_vm_thread_) \
399 399 do { \
400 400 assert(SafepointSynchronize::is_at_safepoint() && \
401 401 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
402 402 heap_locking_asserts_err_msg("should be at a safepoint")); \
403 403 } while (0)
404 404
405 405 #define assert_not_at_safepoint() \
406 406 do { \
407 407 assert(!SafepointSynchronize::is_at_safepoint(), \
408 408 heap_locking_asserts_err_msg("should not be at a safepoint")); \
409 409 } while (0)
410 410
411 411 protected:
412 412
413 413 // The young region list.
414 414 YoungList* _young_list;
415 415
416 416 // The current policy object for the collector.
417 417 G1CollectorPolicy* _g1_policy;
418 418
419 419 // This is the second level of trying to allocate a new region. If
420 420 // new_region() didn't find a region on the free_list, this call will
421 421 // check whether there's anything available on the
422 422 // secondary_free_list and/or wait for more regions to appear on
423 423 // that list, if _free_regions_coming is set.
424 424 HeapRegion* new_region_try_secondary_free_list();
425 425
426 426 // Try to allocate a single non-humongous HeapRegion sufficient for
427 427 // an allocation of the given word_size. If do_expand is true,
428 428 // attempt to expand the heap if necessary to satisfy the allocation
429 429 // request.
430 430 HeapRegion* new_region(size_t word_size, bool do_expand);
431 431
432 432 // Attempt to satisfy a humongous allocation request of the given
433 433 // size by finding a contiguous set of free regions of num_regions
434 434 // length and remove them from the master free list. Return the
435 435 // index of the first region or G1_NULL_HRS_INDEX if the search
436 436 // was unsuccessful.
437 437 size_t humongous_obj_allocate_find_first(size_t num_regions,
438 438 size_t word_size);
439 439
440 440 // Initialize a contiguous set of free regions of length num_regions
441 441 // and starting at index first so that they appear as a single
442 442 // humongous region.
443 443 HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
444 444 size_t num_regions,
445 445 size_t word_size);
446 446
447 447 // Attempt to allocate a humongous object of the given size. Return
448 448 // NULL if unsuccessful.
449 449 HeapWord* humongous_obj_allocate(size_t word_size);
450 450
451 451 // The following two methods, allocate_new_tlab() and
452 452 // mem_allocate(), are the two main entry points from the runtime
453 453 // into the G1's allocation routines. They have the following
454 454 // assumptions:
455 455 //
456 456 // * They should both be called outside safepoints.
457 457 //
458 458 // * They should both be called without holding the Heap_lock.
459 459 //
460 460 // * All allocation requests for new TLABs should go to
461 461 // allocate_new_tlab().
462 462 //
463 463 // * All non-TLAB allocation requests should go to mem_allocate().
464 464 //
465 465 // * If either call cannot satisfy the allocation request using the
466 466 // current allocating region, they will try to get a new one. If
467 467 // this fails, they will attempt to do an evacuation pause and
468 468 // retry the allocation.
469 469 //
470 470 // * If all allocation attempts fail, even after trying to schedule
471 471 // an evacuation pause, allocate_new_tlab() will return NULL,
472 472 // whereas mem_allocate() will attempt a heap expansion and/or
473 473 // schedule a Full GC.
474 474 //
475 475 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
476 476 // should never be called with word_size being humongous. All
477 477 // humongous allocation requests should go to mem_allocate() which
478 478 // will satisfy them with a special path.
479 479
480 480 virtual HeapWord* allocate_new_tlab(size_t word_size);
481 481
482 482 virtual HeapWord* mem_allocate(size_t word_size,
483 483 bool* gc_overhead_limit_was_exceeded);
484 484
485 485 // The following three methods take a gc_count_before_ret
486 486 // parameter which is used to return the GC count if the method
487 487 // returns NULL. Given that we are required to read the GC count
488 488 // while holding the Heap_lock, and these paths will take the
489 489 // Heap_lock at some point, it's easier to get them to read the GC
490 490 // count while holding the Heap_lock before they return NULL instead
491 491 // of the caller (namely: mem_allocate()) having to also take the
492 492 // Heap_lock just to read the GC count.
493 493
494 494 // First-level mutator allocation attempt: try to allocate out of
495 495 // the mutator alloc region without taking the Heap_lock. This
496 496 // should only be used for non-humongous allocations.
497 497 inline HeapWord* attempt_allocation(size_t word_size,
498 498 unsigned int* gc_count_before_ret);
499 499
500 500 // Second-level mutator allocation attempt: take the Heap_lock and
501 501 // retry the allocation attempt, potentially scheduling a GC
502 502 // pause. This should only be used for non-humongous allocations.
503 503 HeapWord* attempt_allocation_slow(size_t word_size,
504 504 unsigned int* gc_count_before_ret);
505 505
506 506 // Takes the Heap_lock and attempts a humongous allocation. It can
507 507 // potentially schedule a GC pause.
508 508 HeapWord* attempt_allocation_humongous(size_t word_size,
509 509 unsigned int* gc_count_before_ret);
510 510
511 511 // Allocation attempt that should be called during safepoints (e.g.,
512 512 // at the end of a successful GC). expect_null_mutator_alloc_region
513 513 // specifies whether the mutator alloc region is expected to be NULL
514 514 // or not.
515 515 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
516 516 bool expect_null_mutator_alloc_region);
517 517
518 518 // It dirties the cards that cover the block so that so that the post
519 519 // write barrier never queues anything when updating objects on this
520 520 // block. It is assumed (and in fact we assert) that the block
521 521 // belongs to a young region.
522 522 inline void dirty_young_block(HeapWord* start, size_t word_size);
523 523
524 524 // Allocate blocks during garbage collection. Will ensure an
525 525 // allocation region, either by picking one or expanding the
526 526 // heap, and then allocate a block of the given size. The block
527 527 // may not be a humongous - it must fit into a single heap region.
528 528 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
529 529
530 530 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
531 531 HeapRegion* alloc_region,
532 532 bool par,
533 533 size_t word_size);
534 534
535 535 // Ensure that no further allocations can happen in "r", bearing in mind
536 536 // that parallel threads might be attempting allocations.
537 537 void par_allocate_remaining_space(HeapRegion* r);
538 538
539 539 // Allocation attempt during GC for a survivor object / PLAB.
540 540 inline HeapWord* survivor_attempt_allocation(size_t word_size);
541 541
542 542 // Allocation attempt during GC for an old object / PLAB.
543 543 inline HeapWord* old_attempt_allocation(size_t word_size);
544 544
545 545 // These methods are the "callbacks" from the G1AllocRegion class.
546 546
547 547 // For mutator alloc regions.
548 548 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
549 549 void retire_mutator_alloc_region(HeapRegion* alloc_region,
550 550 size_t allocated_bytes);
551 551
552 552 // For GC alloc regions.
553 553 HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
554 554 GCAllocPurpose ap);
555 555 void retire_gc_alloc_region(HeapRegion* alloc_region,
556 556 size_t allocated_bytes, GCAllocPurpose ap);
557 557
558 558 // - if explicit_gc is true, the GC is for a System.gc() or a heap
559 559 // inspection request and should collect the entire heap
560 560 // - if clear_all_soft_refs is true, all soft references should be
561 561 // cleared during the GC
562 562 // - if explicit_gc is false, word_size describes the allocation that
563 563 // the GC should attempt (at least) to satisfy
564 564 // - it returns false if it is unable to do the collection due to the
565 565 // GC locker being active, true otherwise
566 566 bool do_collection(bool explicit_gc,
567 567 bool clear_all_soft_refs,
568 568 size_t word_size);
569 569
570 570 // Callback from VM_G1CollectFull operation.
571 571 // Perform a full collection.
572 572 void do_full_collection(bool clear_all_soft_refs);
573 573
574 574 // Resize the heap if necessary after a full collection. If this is
575 575 // after a collect-for allocation, "word_size" is the allocation size,
576 576 // and will be considered part of the used portion of the heap.
577 577 void resize_if_necessary_after_full_collection(size_t word_size);
578 578
579 579 // Callback from VM_G1CollectForAllocation operation.
580 580 // This function does everything necessary/possible to satisfy a
581 581 // failed allocation request (including collection, expansion, etc.)
582 582 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
583 583
584 584 // Attempting to expand the heap sufficiently
585 585 // to support an allocation of the given "word_size". If
586 586 // successful, perform the allocation and return the address of the
587 587 // allocated block, or else "NULL".
588 588 HeapWord* expand_and_allocate(size_t word_size);
589 589
590 590 // Process any reference objects discovered during
591 591 // an incremental evacuation pause.
592 592 void process_discovered_references();
593 593
594 594 // Enqueue any remaining discovered references
595 595 // after processing.
596 596 void enqueue_discovered_references();
597 597
598 598 public:
599 599
600 600 G1MonitoringSupport* g1mm() {
601 601 assert(_g1mm != NULL, "should have been initialized");
602 602 return _g1mm;
603 603 }
604 604
605 605 // Expand the garbage-first heap by at least the given size (in bytes!).
606 606 // Returns true if the heap was expanded by the requested amount;
607 607 // false otherwise.
608 608 // (Rounds up to a HeapRegion boundary.)
609 609 bool expand(size_t expand_bytes);
610 610
611 611 // Do anything common to GC's.
612 612 virtual void gc_prologue(bool full);
613 613 virtual void gc_epilogue(bool full);
614 614
615 615 // We register a region with the fast "in collection set" test. We
616 616 // simply set to true the array slot corresponding to this region.
617 617 void register_region_with_in_cset_fast_test(HeapRegion* r) {
618 618 assert(_in_cset_fast_test_base != NULL, "sanity");
619 619 assert(r->in_collection_set(), "invariant");
620 620 size_t index = r->hrs_index();
621 621 assert(index < _in_cset_fast_test_length, "invariant");
622 622 assert(!_in_cset_fast_test_base[index], "invariant");
623 623 _in_cset_fast_test_base[index] = true;
624 624 }
625 625
626 626 // This is a fast test on whether a reference points into the
627 627 // collection set or not. It does not assume that the reference
628 628 // points into the heap; if it doesn't, it will return false.
629 629 bool in_cset_fast_test(oop obj) {
630 630 assert(_in_cset_fast_test != NULL, "sanity");
631 631 if (_g1_committed.contains((HeapWord*) obj)) {
632 632 // no need to subtract the bottom of the heap from obj,
633 633 // _in_cset_fast_test is biased
634 634 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
635 635 bool ret = _in_cset_fast_test[index];
636 636 // let's make sure the result is consistent with what the slower
637 637 // test returns
638 638 assert( ret || !obj_in_cs(obj), "sanity");
639 639 assert(!ret || obj_in_cs(obj), "sanity");
640 640 return ret;
641 641 } else {
642 642 return false;
643 643 }
644 644 }
645 645
646 646 void clear_cset_fast_test() {
647 647 assert(_in_cset_fast_test_base != NULL, "sanity");
648 648 memset(_in_cset_fast_test_base, false,
649 649 _in_cset_fast_test_length * sizeof(bool));
650 650 }
651 651
652 652 // This is called at the end of either a concurrent cycle or a Full
653 653 // GC to update the number of full collections completed. Those two
654 654 // can happen in a nested fashion, i.e., we start a concurrent
655 655 // cycle, a Full GC happens half-way through it which ends first,
656 656 // and then the cycle notices that a Full GC happened and ends
657 657 // too. The concurrent parameter is a boolean to help us do a bit
658 658 // tighter consistency checking in the method. If concurrent is
659 659 // false, the caller is the inner caller in the nesting (i.e., the
660 660 // Full GC). If concurrent is true, the caller is the outer caller
661 661 // in this nesting (i.e., the concurrent cycle). Further nesting is
662 662 // not currently supported. The end of the this call also notifies
663 663 // the FullGCCount_lock in case a Java thread is waiting for a full
664 664 // GC to happen (e.g., it called System.gc() with
665 665 // +ExplicitGCInvokesConcurrent).
666 666 void increment_full_collections_completed(bool concurrent);
667 667
668 668 unsigned int full_collections_completed() {
669 669 return _full_collections_completed;
670 670 }
671 671
672 672 G1HRPrinter* hr_printer() { return &_hr_printer; }
673 673
674 674 protected:
675 675
676 676 // Shrink the garbage-first heap by at most the given size (in bytes!).
677 677 // (Rounds down to a HeapRegion boundary.)
678 678 virtual void shrink(size_t expand_bytes);
679 679 void shrink_helper(size_t expand_bytes);
680 680
681 681 #if TASKQUEUE_STATS
682 682 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
683 683 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
684 684 void reset_taskqueue_stats();
685 685 #endif // TASKQUEUE_STATS
686 686
687 687 // Schedule the VM operation that will do an evacuation pause to
688 688 // satisfy an allocation request of word_size. *succeeded will
689 689 // return whether the VM operation was successful (it did do an
690 690 // evacuation pause) or not (another thread beat us to it or the GC
691 691 // locker was active). Given that we should not be holding the
692 692 // Heap_lock when we enter this method, we will pass the
693 693 // gc_count_before (i.e., total_collections()) as a parameter since
694 694 // it has to be read while holding the Heap_lock. Currently, both
695 695 // methods that call do_collection_pause() release the Heap_lock
696 696 // before the call, so it's easy to read gc_count_before just before.
697 697 HeapWord* do_collection_pause(size_t word_size,
698 698 unsigned int gc_count_before,
699 699 bool* succeeded);
700 700
701 701 // The guts of the incremental collection pause, executed by the vm
702 702 // thread. It returns false if it is unable to do the collection due
703 703 // to the GC locker being active, true otherwise
704 704 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
705 705
706 706 // Actually do the work of evacuating the collection set.
707 707 void evacuate_collection_set();
708 708
709 709 // The g1 remembered set of the heap.
710 710 G1RemSet* _g1_rem_set;
711 711 // And it's mod ref barrier set, used to track updates for the above.
712 712 ModRefBarrierSet* _mr_bs;
713 713
714 714 // A set of cards that cover the objects for which the Rsets should be updated
715 715 // concurrently after the collection.
716 716 DirtyCardQueueSet _dirty_card_queue_set;
717 717
718 718 // The Heap Region Rem Set Iterator.
719 719 HeapRegionRemSetIterator** _rem_set_iterator;
720 720
721 721 // The closure used to refine a single card.
722 722 RefineCardTableEntryClosure* _refine_cte_cl;
723 723
724 724 // A function to check the consistency of dirty card logs.
725 725 void check_ct_logs_at_safepoint();
726 726
727 727 // A DirtyCardQueueSet that is used to hold cards that contain
728 728 // references into the current collection set. This is used to
729 729 // update the remembered sets of the regions in the collection
730 730 // set in the event of an evacuation failure.
731 731 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
732 732
733 733 // After a collection pause, make the regions in the CS into free
734 734 // regions.
735 735 void free_collection_set(HeapRegion* cs_head);
736 736
737 737 // Abandon the current collection set without recording policy
738 738 // statistics or updating free lists.
739 739 void abandon_collection_set(HeapRegion* cs_head);
740 740
741 741 // Applies "scan_non_heap_roots" to roots outside the heap,
742 742 // "scan_rs" to roots inside the heap (having done "set_region" to
743 743 // indicate the region in which the root resides), and does "scan_perm"
744 744 // (setting the generation to the perm generation.) If "scan_rs" is
745 745 // NULL, then this step is skipped. The "worker_i"
746 746 // param is for use with parallel roots processing, and should be
747 747 // the "i" of the calling parallel worker thread's work(i) function.
748 748 // In the sequential case this param will be ignored.
749 749 void g1_process_strong_roots(bool collecting_perm_gen,
750 750 SharedHeap::ScanningOption so,
751 751 OopClosure* scan_non_heap_roots,
752 752 OopsInHeapRegionClosure* scan_rs,
753 753 OopsInGenClosure* scan_perm,
754 754 int worker_i);
755 755
756 756 // Apply "blk" to all the weak roots of the system. These include
757 757 // JNI weak roots, the code cache, system dictionary, symbol table,
758 758 // string table, and referents of reachable weak refs.
759 759 void g1_process_weak_roots(OopClosure* root_closure,
760 760 OopClosure* non_root_closure);
761 761
762 762 // Frees a non-humongous region by initializing its contents and
763 763 // adding it to the free list that's passed as a parameter (this is
764 764 // usually a local list which will be appended to the master free
765 765 // list later). The used bytes of freed regions are accumulated in
766 766 // pre_used. If par is true, the region's RSet will not be freed
767 767 // up. The assumption is that this will be done later.
768 768 void free_region(HeapRegion* hr,
769 769 size_t* pre_used,
770 770 FreeRegionList* free_list,
771 771 bool par);
772 772
773 773 // Frees a humongous region by collapsing it into individual regions
774 774 // and calling free_region() for each of them. The freed regions
775 775 // will be added to the free list that's passed as a parameter (this
776 776 // is usually a local list which will be appended to the master free
777 777 // list later). The used bytes of freed regions are accumulated in
778 778 // pre_used. If par is true, the region's RSet will not be freed
779 779 // up. The assumption is that this will be done later.
780 780 void free_humongous_region(HeapRegion* hr,
781 781 size_t* pre_used,
782 782 FreeRegionList* free_list,
783 783 HumongousRegionSet* humongous_proxy_set,
784 784 bool par);
785 785
786 786 // Notifies all the necessary spaces that the committed space has
787 787 // been updated (either expanded or shrunk). It should be called
788 788 // after _g1_storage is updated.
789 789 void update_committed_space(HeapWord* old_end, HeapWord* new_end);
790 790
791 791 // The concurrent marker (and the thread it runs in.)
792 792 ConcurrentMark* _cm;
793 793 ConcurrentMarkThread* _cmThread;
794 794 bool _mark_in_progress;
795 795
796 796 // The concurrent refiner.
797 797 ConcurrentG1Refine* _cg1r;
798 798
799 799 // The parallel task queues
800 800 RefToScanQueueSet *_task_queues;
801 801
802 802 // True iff a evacuation has failed in the current collection.
803 803 bool _evacuation_failed;
804 804
805 805 // Set the attribute indicating whether evacuation has failed in the
806 806 // current collection.
807 807 void set_evacuation_failed(bool b) { _evacuation_failed = b; }
808 808
809 809 // Failed evacuations cause some logical from-space objects to have
810 810 // forwarding pointers to themselves. Reset them.
811 811 void remove_self_forwarding_pointers();
812 812
813 813 // When one is non-null, so is the other. Together, they each pair is
814 814 // an object with a preserved mark, and its mark value.
815 815 GrowableArray<oop>* _objs_with_preserved_marks;
816 816 GrowableArray<markOop>* _preserved_marks_of_objs;
817 817
818 818 // Preserve the mark of "obj", if necessary, in preparation for its mark
819 819 // word being overwritten with a self-forwarding-pointer.
820 820 void preserve_mark_if_necessary(oop obj, markOop m);
821 821
822 822 // The stack of evac-failure objects left to be scanned.
823 823 GrowableArray<oop>* _evac_failure_scan_stack;
824 824 // The closure to apply to evac-failure objects.
825 825
826 826 OopsInHeapRegionClosure* _evac_failure_closure;
827 827 // Set the field above.
828 828 void
829 829 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
830 830 _evac_failure_closure = evac_failure_closure;
831 831 }
832 832
833 833 // Push "obj" on the scan stack.
834 834 void push_on_evac_failure_scan_stack(oop obj);
835 835 // Process scan stack entries until the stack is empty.
836 836 void drain_evac_failure_scan_stack();
837 837 // True iff an invocation of "drain_scan_stack" is in progress; to
838 838 // prevent unnecessary recursion.
839 839 bool _drain_in_progress;
840 840
↓ open down ↓ |
840 lines elided |
↑ open up ↑ |
841 841 // Do any necessary initialization for evacuation-failure handling.
842 842 // "cl" is the closure that will be used to process evac-failure
843 843 // objects.
844 844 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
845 845 // Do any necessary cleanup for evacuation-failure handling data
846 846 // structures.
847 847 void finalize_for_evac_failure();
848 848
849 849 // An attempt to evacuate "obj" has failed; take necessary steps.
850 850 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
851 - bool should_mark_root);
851 + bool should_mark_root,
852 + int worker_i);
852 853 void handle_evacuation_failure_common(oop obj, markOop m);
853 854
854 855 // ("Weak") Reference processing support.
855 856 //
856 857 // G1 has 2 instances of the referece processor class. One
857 858 // (_ref_processor_cm) handles reference object discovery
858 859 // and subsequent processing during concurrent marking cycles.
859 860 //
860 861 // The other (_ref_processor_stw) handles reference object
861 862 // discovery and processing during full GCs and incremental
862 863 // evacuation pauses.
863 864 //
864 865 // During an incremental pause, reference discovery will be
865 866 // temporarily disabled for _ref_processor_cm and will be
866 867 // enabled for _ref_processor_stw. At the end of the evacuation
867 868 // pause references discovered by _ref_processor_stw will be
868 869 // processed and discovery will be disabled. The previous
869 870 // setting for reference object discovery for _ref_processor_cm
870 871 // will be re-instated.
871 872 //
872 873 // At the start of marking:
873 874 // * Discovery by the CM ref processor is verified to be inactive
874 875 // and it's discovered lists are empty.
875 876 // * Discovery by the CM ref processor is then enabled.
876 877 //
877 878 // At the end of marking:
878 879 // * Any references on the CM ref processor's discovered
879 880 // lists are processed (possibly MT).
880 881 //
881 882 // At the start of full GC we:
882 883 // * Disable discovery by the CM ref processor and
883 884 // empty CM ref processor's discovered lists
884 885 // (without processing any entries).
885 886 // * Verify that the STW ref processor is inactive and it's
886 887 // discovered lists are empty.
887 888 // * Temporarily set STW ref processor discovery as single threaded.
888 889 // * Temporarily clear the STW ref processor's _is_alive_non_header
889 890 // field.
890 891 // * Finally enable discovery by the STW ref processor.
891 892 //
892 893 // The STW ref processor is used to record any discovered
893 894 // references during the full GC.
894 895 //
895 896 // At the end of a full GC we:
896 897 // * Enqueue any reference objects discovered by the STW ref processor
897 898 // that have non-live referents. This has the side-effect of
898 899 // making the STW ref processor inactive by disabling discovery.
899 900 // * Verify that the CM ref processor is still inactive
900 901 // and no references have been placed on it's discovered
901 902 // lists (also checked as a precondition during initial marking).
902 903
903 904 // The (stw) reference processor...
904 905 ReferenceProcessor* _ref_processor_stw;
905 906
906 907 // During reference object discovery, the _is_alive_non_header
907 908 // closure (if non-null) is applied to the referent object to
908 909 // determine whether the referent is live. If so then the
909 910 // reference object does not need to be 'discovered' and can
910 911 // be treated as a regular oop. This has the benefit of reducing
911 912 // the number of 'discovered' reference objects that need to
912 913 // be processed.
913 914 //
914 915 // Instance of the is_alive closure for embedding into the
915 916 // STW reference processor as the _is_alive_non_header field.
916 917 // Supplying a value for the _is_alive_non_header field is
917 918 // optional but doing so prevents unnecessary additions to
918 919 // the discovered lists during reference discovery.
919 920 G1STWIsAliveClosure _is_alive_closure_stw;
920 921
921 922 // The (concurrent marking) reference processor...
922 923 ReferenceProcessor* _ref_processor_cm;
923 924
924 925 // Instance of the concurrent mark is_alive closure for embedding
925 926 // into the Concurrent Marking reference processor as the
926 927 // _is_alive_non_header field. Supplying a value for the
927 928 // _is_alive_non_header field is optional but doing so prevents
928 929 // unnecessary additions to the discovered lists during reference
929 930 // discovery.
930 931 G1CMIsAliveClosure _is_alive_closure_cm;
931 932
932 933 enum G1H_process_strong_roots_tasks {
933 934 G1H_PS_mark_stack_oops_do,
934 935 G1H_PS_refProcessor_oops_do,
935 936 // Leave this one last.
936 937 G1H_PS_NumElements
937 938 };
938 939
939 940 SubTasksDone* _process_strong_tasks;
940 941
941 942 volatile bool _free_regions_coming;
942 943
943 944 public:
944 945
945 946 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
946 947
947 948 void set_refine_cte_cl_concurrency(bool concurrent);
948 949
949 950 RefToScanQueue *task_queue(int i) const;
950 951
951 952 // A set of cards where updates happened during the GC
952 953 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
953 954
954 955 // A DirtyCardQueueSet that is used to hold cards that contain
955 956 // references into the current collection set. This is used to
956 957 // update the remembered sets of the regions in the collection
957 958 // set in the event of an evacuation failure.
958 959 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
959 960 { return _into_cset_dirty_card_queue_set; }
960 961
961 962 // Create a G1CollectedHeap with the specified policy.
962 963 // Must call the initialize method afterwards.
963 964 // May not return if something goes wrong.
964 965 G1CollectedHeap(G1CollectorPolicy* policy);
965 966
966 967 // Initialize the G1CollectedHeap to have the initial and
967 968 // maximum sizes, permanent generation, and remembered and barrier sets
968 969 // specified by the policy object.
969 970 jint initialize();
970 971
971 972 // Initialize weak reference processing.
972 973 virtual void ref_processing_init();
973 974
974 975 void set_par_threads(int t) {
975 976 SharedHeap::set_par_threads(t);
976 977 _process_strong_tasks->set_n_threads(t);
977 978 }
978 979
979 980 virtual CollectedHeap::Name kind() const {
980 981 return CollectedHeap::G1CollectedHeap;
981 982 }
982 983
983 984 // The current policy object for the collector.
984 985 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
985 986
986 987 // Adaptive size policy. No such thing for g1.
987 988 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
988 989
989 990 // The rem set and barrier set.
990 991 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
991 992 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
992 993
993 994 // The rem set iterator.
994 995 HeapRegionRemSetIterator* rem_set_iterator(int i) {
995 996 return _rem_set_iterator[i];
996 997 }
997 998
998 999 HeapRegionRemSetIterator* rem_set_iterator() {
999 1000 return _rem_set_iterator[0];
1000 1001 }
1001 1002
1002 1003 unsigned get_gc_time_stamp() {
1003 1004 return _gc_time_stamp;
1004 1005 }
1005 1006
1006 1007 void reset_gc_time_stamp() {
1007 1008 _gc_time_stamp = 0;
1008 1009 OrderAccess::fence();
1009 1010 }
1010 1011
1011 1012 void increment_gc_time_stamp() {
1012 1013 ++_gc_time_stamp;
1013 1014 OrderAccess::fence();
1014 1015 }
1015 1016
1016 1017 void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1017 1018 DirtyCardQueue* into_cset_dcq,
1018 1019 bool concurrent, int worker_i);
1019 1020
1020 1021 // The shared block offset table array.
1021 1022 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1022 1023
1023 1024 // Reference Processing accessors
1024 1025
1025 1026 // The STW reference processor....
1026 1027 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1027 1028
1028 1029 // The Concurent Marking reference processor...
1029 1030 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1030 1031
1031 1032 virtual size_t capacity() const;
1032 1033 virtual size_t used() const;
1033 1034 // This should be called when we're not holding the heap lock. The
1034 1035 // result might be a bit inaccurate.
1035 1036 size_t used_unlocked() const;
1036 1037 size_t recalculate_used() const;
1037 1038
1038 1039 // These virtual functions do the actual allocation.
1039 1040 // Some heaps may offer a contiguous region for shared non-blocking
1040 1041 // allocation, via inlined code (by exporting the address of the top and
1041 1042 // end fields defining the extent of the contiguous allocation region.)
1042 1043 // But G1CollectedHeap doesn't yet support this.
1043 1044
1044 1045 // Return an estimate of the maximum allocation that could be performed
1045 1046 // without triggering any collection or expansion activity. In a
1046 1047 // generational collector, for example, this is probably the largest
1047 1048 // allocation that could be supported (without expansion) in the youngest
1048 1049 // generation. It is "unsafe" because no locks are taken; the result
1049 1050 // should be treated as an approximation, not a guarantee, for use in
1050 1051 // heuristic resizing decisions.
1051 1052 virtual size_t unsafe_max_alloc();
1052 1053
1053 1054 virtual bool is_maximal_no_gc() const {
1054 1055 return _g1_storage.uncommitted_size() == 0;
1055 1056 }
1056 1057
1057 1058 // The total number of regions in the heap.
1058 1059 size_t n_regions() { return _hrs.length(); }
1059 1060
1060 1061 // The max number of regions in the heap.
1061 1062 size_t max_regions() { return _hrs.max_length(); }
1062 1063
1063 1064 // The number of regions that are completely free.
1064 1065 size_t free_regions() { return _free_list.length(); }
1065 1066
1066 1067 // The number of regions that are not completely free.
1067 1068 size_t used_regions() { return n_regions() - free_regions(); }
1068 1069
1069 1070 // The number of regions available for "regular" expansion.
1070 1071 size_t expansion_regions() { return _expansion_regions; }
1071 1072
1072 1073 // Factory method for HeapRegion instances. It will return NULL if
1073 1074 // the allocation fails.
1074 1075 HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
1075 1076
1076 1077 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1077 1078 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1078 1079 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
1079 1080 void verify_dirty_young_regions() PRODUCT_RETURN;
1080 1081
1081 1082 // verify_region_sets() performs verification over the region
1082 1083 // lists. It will be compiled in the product code to be used when
1083 1084 // necessary (i.e., during heap verification).
1084 1085 void verify_region_sets();
1085 1086
1086 1087 // verify_region_sets_optional() is planted in the code for
1087 1088 // list verification in non-product builds (and it can be enabled in
1088 1089 // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
1089 1090 #if HEAP_REGION_SET_FORCE_VERIFY
1090 1091 void verify_region_sets_optional() {
1091 1092 verify_region_sets();
1092 1093 }
1093 1094 #else // HEAP_REGION_SET_FORCE_VERIFY
1094 1095 void verify_region_sets_optional() { }
1095 1096 #endif // HEAP_REGION_SET_FORCE_VERIFY
1096 1097
1097 1098 #ifdef ASSERT
1098 1099 bool is_on_master_free_list(HeapRegion* hr) {
1099 1100 return hr->containing_set() == &_free_list;
1100 1101 }
1101 1102
1102 1103 bool is_in_humongous_set(HeapRegion* hr) {
1103 1104 return hr->containing_set() == &_humongous_set;
1104 1105 }
1105 1106 #endif // ASSERT
1106 1107
1107 1108 // Wrapper for the region list operations that can be called from
1108 1109 // methods outside this class.
1109 1110
1110 1111 void secondary_free_list_add_as_tail(FreeRegionList* list) {
1111 1112 _secondary_free_list.add_as_tail(list);
1112 1113 }
1113 1114
1114 1115 void append_secondary_free_list() {
1115 1116 _free_list.add_as_head(&_secondary_free_list);
1116 1117 }
1117 1118
1118 1119 void append_secondary_free_list_if_not_empty_with_lock() {
1119 1120 // If the secondary free list looks empty there's no reason to
1120 1121 // take the lock and then try to append it.
1121 1122 if (!_secondary_free_list.is_empty()) {
1122 1123 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1123 1124 append_secondary_free_list();
1124 1125 }
1125 1126 }
1126 1127
1127 1128 void set_free_regions_coming();
1128 1129 void reset_free_regions_coming();
1129 1130 bool free_regions_coming() { return _free_regions_coming; }
1130 1131 void wait_while_free_regions_coming();
1131 1132
1132 1133 // Perform a collection of the heap; intended for use in implementing
1133 1134 // "System.gc". This probably implies as full a collection as the
1134 1135 // "CollectedHeap" supports.
1135 1136 virtual void collect(GCCause::Cause cause);
1136 1137
1137 1138 // The same as above but assume that the caller holds the Heap_lock.
1138 1139 void collect_locked(GCCause::Cause cause);
1139 1140
1140 1141 // This interface assumes that it's being called by the
1141 1142 // vm thread. It collects the heap assuming that the
1142 1143 // heap lock is already held and that we are executing in
1143 1144 // the context of the vm thread.
1144 1145 virtual void collect_as_vm_thread(GCCause::Cause cause);
1145 1146
1146 1147 // True iff a evacuation has failed in the most-recent collection.
1147 1148 bool evacuation_failed() { return _evacuation_failed; }
1148 1149
1149 1150 // It will free a region if it has allocated objects in it that are
1150 1151 // all dead. It calls either free_region() or
1151 1152 // free_humongous_region() depending on the type of the region that
1152 1153 // is passed to it.
1153 1154 void free_region_if_empty(HeapRegion* hr,
1154 1155 size_t* pre_used,
1155 1156 FreeRegionList* free_list,
1156 1157 HumongousRegionSet* humongous_proxy_set,
1157 1158 HRRSCleanupTask* hrrs_cleanup_task,
1158 1159 bool par);
1159 1160
1160 1161 // It appends the free list to the master free list and updates the
1161 1162 // master humongous list according to the contents of the proxy
1162 1163 // list. It also adjusts the total used bytes according to pre_used
1163 1164 // (if par is true, it will do so by taking the ParGCRareEvent_lock).
1164 1165 void update_sets_after_freeing_regions(size_t pre_used,
1165 1166 FreeRegionList* free_list,
1166 1167 HumongousRegionSet* humongous_proxy_set,
1167 1168 bool par);
1168 1169
1169 1170 // Returns "TRUE" iff "p" points into the allocated area of the heap.
1170 1171 virtual bool is_in(const void* p) const;
1171 1172
1172 1173 // Return "TRUE" iff the given object address is within the collection
1173 1174 // set.
1174 1175 inline bool obj_in_cs(oop obj);
1175 1176
1176 1177 // Return "TRUE" iff the given object address is in the reserved
1177 1178 // region of g1 (excluding the permanent generation).
1178 1179 bool is_in_g1_reserved(const void* p) const {
1179 1180 return _g1_reserved.contains(p);
1180 1181 }
1181 1182
1182 1183 // Returns a MemRegion that corresponds to the space that has been
1183 1184 // reserved for the heap
1184 1185 MemRegion g1_reserved() {
1185 1186 return _g1_reserved;
1186 1187 }
1187 1188
1188 1189 // Returns a MemRegion that corresponds to the space that has been
1189 1190 // committed in the heap
1190 1191 MemRegion g1_committed() {
1191 1192 return _g1_committed;
1192 1193 }
1193 1194
1194 1195 virtual bool is_in_closed_subset(const void* p) const;
1195 1196
1196 1197 // This resets the card table to all zeros. It is used after
1197 1198 // a collection pause which used the card table to claim cards.
1198 1199 void cleanUpCardTable();
1199 1200
1200 1201 // Iteration functions.
1201 1202
1202 1203 // Iterate over all the ref-containing fields of all objects, calling
1203 1204 // "cl.do_oop" on each.
1204 1205 virtual void oop_iterate(OopClosure* cl) {
1205 1206 oop_iterate(cl, true);
1206 1207 }
1207 1208 void oop_iterate(OopClosure* cl, bool do_perm);
1208 1209
1209 1210 // Same as above, restricted to a memory region.
1210 1211 virtual void oop_iterate(MemRegion mr, OopClosure* cl) {
1211 1212 oop_iterate(mr, cl, true);
1212 1213 }
1213 1214 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
1214 1215
1215 1216 // Iterate over all objects, calling "cl.do_object" on each.
1216 1217 virtual void object_iterate(ObjectClosure* cl) {
1217 1218 object_iterate(cl, true);
1218 1219 }
1219 1220 virtual void safe_object_iterate(ObjectClosure* cl) {
1220 1221 object_iterate(cl, true);
1221 1222 }
1222 1223 void object_iterate(ObjectClosure* cl, bool do_perm);
1223 1224
1224 1225 // Iterate over all objects allocated since the last collection, calling
1225 1226 // "cl.do_object" on each. The heap must have been initialized properly
1226 1227 // to support this function, or else this call will fail.
1227 1228 virtual void object_iterate_since_last_GC(ObjectClosure* cl);
1228 1229
1229 1230 // Iterate over all spaces in use in the heap, in ascending address order.
1230 1231 virtual void space_iterate(SpaceClosure* cl);
1231 1232
1232 1233 // Iterate over heap regions, in address order, terminating the
1233 1234 // iteration early if the "doHeapRegion" method returns "true".
1234 1235 void heap_region_iterate(HeapRegionClosure* blk) const;
1235 1236
1236 1237 // Iterate over heap regions starting with r (or the first region if "r"
1237 1238 // is NULL), in address order, terminating early if the "doHeapRegion"
1238 1239 // method returns "true".
1239 1240 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
1240 1241
1241 1242 // Return the region with the given index. It assumes the index is valid.
1242 1243 HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
1243 1244
1244 1245 // Divide the heap region sequence into "chunks" of some size (the number
1245 1246 // of regions divided by the number of parallel threads times some
1246 1247 // overpartition factor, currently 4). Assumes that this will be called
1247 1248 // in parallel by ParallelGCThreads worker threads with discinct worker
1248 1249 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1249 1250 // calls will use the same "claim_value", and that that claim value is
1250 1251 // different from the claim_value of any heap region before the start of
1251 1252 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
1252 1253 // attempting to claim the first region in each chunk, and, if
1253 1254 // successful, applying the closure to each region in the chunk (and
1254 1255 // setting the claim value of the second and subsequent regions of the
1255 1256 // chunk.) For now requires that "doHeapRegion" always returns "false",
1256 1257 // i.e., that a closure never attempt to abort a traversal.
1257 1258 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1258 1259 int worker,
1259 1260 jint claim_value);
1260 1261
1261 1262 // It resets all the region claim values to the default.
1262 1263 void reset_heap_region_claim_values();
1263 1264
1264 1265 #ifdef ASSERT
1265 1266 bool check_heap_region_claim_values(jint claim_value);
1266 1267 #endif // ASSERT
1267 1268
1268 1269 // Iterate over the regions (if any) in the current collection set.
1269 1270 void collection_set_iterate(HeapRegionClosure* blk);
1270 1271
1271 1272 // As above but starting from region r
1272 1273 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1273 1274
1274 1275 // Returns the first (lowest address) compactible space in the heap.
1275 1276 virtual CompactibleSpace* first_compactible_space();
1276 1277
1277 1278 // A CollectedHeap will contain some number of spaces. This finds the
1278 1279 // space containing a given address, or else returns NULL.
1279 1280 virtual Space* space_containing(const void* addr) const;
1280 1281
1281 1282 // A G1CollectedHeap will contain some number of heap regions. This
1282 1283 // finds the region containing a given address, or else returns NULL.
1283 1284 template <class T>
1284 1285 inline HeapRegion* heap_region_containing(const T addr) const;
1285 1286
1286 1287 // Like the above, but requires "addr" to be in the heap (to avoid a
1287 1288 // null-check), and unlike the above, may return an continuing humongous
1288 1289 // region.
1289 1290 template <class T>
1290 1291 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1291 1292
1292 1293 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1293 1294 // each address in the (reserved) heap is a member of exactly
1294 1295 // one block. The defining characteristic of a block is that it is
1295 1296 // possible to find its size, and thus to progress forward to the next
1296 1297 // block. (Blocks may be of different sizes.) Thus, blocks may
1297 1298 // represent Java objects, or they might be free blocks in a
1298 1299 // free-list-based heap (or subheap), as long as the two kinds are
1299 1300 // distinguishable and the size of each is determinable.
1300 1301
1301 1302 // Returns the address of the start of the "block" that contains the
1302 1303 // address "addr". We say "blocks" instead of "object" since some heaps
1303 1304 // may not pack objects densely; a chunk may either be an object or a
1304 1305 // non-object.
1305 1306 virtual HeapWord* block_start(const void* addr) const;
1306 1307
1307 1308 // Requires "addr" to be the start of a chunk, and returns its size.
1308 1309 // "addr + size" is required to be the start of a new chunk, or the end
1309 1310 // of the active area of the heap.
1310 1311 virtual size_t block_size(const HeapWord* addr) const;
1311 1312
1312 1313 // Requires "addr" to be the start of a block, and returns "TRUE" iff
1313 1314 // the block is an object.
1314 1315 virtual bool block_is_obj(const HeapWord* addr) const;
1315 1316
1316 1317 // Does this heap support heap inspection? (+PrintClassHistogram)
1317 1318 virtual bool supports_heap_inspection() const { return true; }
1318 1319
1319 1320 // Section on thread-local allocation buffers (TLABs)
1320 1321 // See CollectedHeap for semantics.
1321 1322
1322 1323 virtual bool supports_tlab_allocation() const;
1323 1324 virtual size_t tlab_capacity(Thread* thr) const;
1324 1325 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
1325 1326
1326 1327 // Can a compiler initialize a new object without store barriers?
1327 1328 // This permission only extends from the creation of a new object
1328 1329 // via a TLAB up to the first subsequent safepoint. If such permission
1329 1330 // is granted for this heap type, the compiler promises to call
1330 1331 // defer_store_barrier() below on any slow path allocation of
1331 1332 // a new object for which such initializing store barriers will
1332 1333 // have been elided. G1, like CMS, allows this, but should be
1333 1334 // ready to provide a compensating write barrier as necessary
1334 1335 // if that storage came out of a non-young region. The efficiency
1335 1336 // of this implementation depends crucially on being able to
1336 1337 // answer very efficiently in constant time whether a piece of
1337 1338 // storage in the heap comes from a young region or not.
1338 1339 // See ReduceInitialCardMarks.
1339 1340 virtual bool can_elide_tlab_store_barriers() const {
1340 1341 // 6920090: Temporarily disabled, because of lingering
1341 1342 // instabilities related to RICM with G1. In the
1342 1343 // interim, the option ReduceInitialCardMarksForG1
1343 1344 // below is left solely as a debugging device at least
1344 1345 // until 6920109 fixes the instabilities.
1345 1346 return ReduceInitialCardMarksForG1;
1346 1347 }
1347 1348
1348 1349 virtual bool card_mark_must_follow_store() const {
1349 1350 return true;
1350 1351 }
1351 1352
1352 1353 bool is_in_young(const oop obj) {
1353 1354 HeapRegion* hr = heap_region_containing(obj);
1354 1355 return hr != NULL && hr->is_young();
1355 1356 }
1356 1357
1357 1358 #ifdef ASSERT
1358 1359 virtual bool is_in_partial_collection(const void* p);
1359 1360 #endif
1360 1361
1361 1362 virtual bool is_scavengable(const void* addr);
1362 1363
1363 1364 // We don't need barriers for initializing stores to objects
1364 1365 // in the young gen: for the SATB pre-barrier, there is no
1365 1366 // pre-value that needs to be remembered; for the remembered-set
1366 1367 // update logging post-barrier, we don't maintain remembered set
1367 1368 // information for young gen objects.
1368 1369 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1369 1370 // Re 6920090, 6920109 above.
1370 1371 assert(ReduceInitialCardMarksForG1, "Else cannot be here");
1371 1372 return is_in_young(new_obj);
1372 1373 }
1373 1374
1374 1375 // Can a compiler elide a store barrier when it writes
1375 1376 // a permanent oop into the heap? Applies when the compiler
1376 1377 // is storing x to the heap, where x->is_perm() is true.
1377 1378 virtual bool can_elide_permanent_oop_store_barriers() const {
1378 1379 // At least until perm gen collection is also G1-ified, at
1379 1380 // which point this should return false.
1380 1381 return true;
1381 1382 }
1382 1383
1383 1384 // Returns "true" iff the given word_size is "very large".
1384 1385 static bool isHumongous(size_t word_size) {
1385 1386 // Note this has to be strictly greater-than as the TLABs
1386 1387 // are capped at the humongous thresold and we want to
1387 1388 // ensure that we don't try to allocate a TLAB as
1388 1389 // humongous and that we don't allocate a humongous
1389 1390 // object in a TLAB.
1390 1391 return word_size > _humongous_object_threshold_in_words;
1391 1392 }
1392 1393
1393 1394 // Update mod union table with the set of dirty cards.
1394 1395 void updateModUnion();
1395 1396
1396 1397 // Set the mod union bits corresponding to the given memRegion. Note
1397 1398 // that this is always a safe operation, since it doesn't clear any
1398 1399 // bits.
1399 1400 void markModUnionRange(MemRegion mr);
1400 1401
1401 1402 // Records the fact that a marking phase is no longer in progress.
1402 1403 void set_marking_complete() {
1403 1404 _mark_in_progress = false;
1404 1405 }
1405 1406 void set_marking_started() {
1406 1407 _mark_in_progress = true;
1407 1408 }
1408 1409 bool mark_in_progress() {
1409 1410 return _mark_in_progress;
1410 1411 }
1411 1412
1412 1413 // Print the maximum heap capacity.
1413 1414 virtual size_t max_capacity() const;
1414 1415
1415 1416 virtual jlong millis_since_last_gc();
1416 1417
1417 1418 // Perform any cleanup actions necessary before allowing a verification.
1418 1419 virtual void prepare_for_verify();
1419 1420
1420 1421 // Perform verification.
1421 1422
1422 1423 // vo == UsePrevMarking -> use "prev" marking information,
1423 1424 // vo == UseNextMarking -> use "next" marking information
1424 1425 // vo == UseMarkWord -> use the mark word in the object header
1425 1426 //
1426 1427 // NOTE: Only the "prev" marking information is guaranteed to be
1427 1428 // consistent most of the time, so most calls to this should use
1428 1429 // vo == UsePrevMarking.
1429 1430 // Currently, there is only one case where this is called with
1430 1431 // vo == UseNextMarking, which is to verify the "next" marking
1431 1432 // information at the end of remark.
1432 1433 // Currently there is only one place where this is called with
1433 1434 // vo == UseMarkWord, which is to verify the marking during a
1434 1435 // full GC.
1435 1436 void verify(bool allow_dirty, bool silent, VerifyOption vo);
1436 1437
1437 1438 // Override; it uses the "prev" marking information
1438 1439 virtual void verify(bool allow_dirty, bool silent);
1439 1440 // Default behavior by calling print(tty);
1440 1441 virtual void print() const;
1441 1442 // This calls print_on(st, PrintHeapAtGCExtended).
1442 1443 virtual void print_on(outputStream* st) const;
1443 1444 // If extended is true, it will print out information for all
1444 1445 // regions in the heap by calling print_on_extended(st).
1445 1446 virtual void print_on(outputStream* st, bool extended) const;
1446 1447 virtual void print_on_extended(outputStream* st) const;
1447 1448
1448 1449 virtual void print_gc_threads_on(outputStream* st) const;
1449 1450 virtual void gc_threads_do(ThreadClosure* tc) const;
1450 1451
1451 1452 // Override
1452 1453 void print_tracing_info() const;
1453 1454
1454 1455 // The following two methods are helpful for debugging RSet issues.
1455 1456 void print_cset_rsets() PRODUCT_RETURN;
1456 1457 void print_all_rsets() PRODUCT_RETURN;
1457 1458
1458 1459 // Convenience function to be used in situations where the heap type can be
1459 1460 // asserted to be this type.
1460 1461 static G1CollectedHeap* heap();
1461 1462
1462 1463 void empty_young_list();
1463 1464
1464 1465 void set_region_short_lived_locked(HeapRegion* hr);
1465 1466 // add appropriate methods for any other surv rate groups
1466 1467
1467 1468 YoungList* young_list() { return _young_list; }
1468 1469
1469 1470 // debugging
1470 1471 bool check_young_list_well_formed() {
1471 1472 return _young_list->check_list_well_formed();
1472 1473 }
1473 1474
1474 1475 bool check_young_list_empty(bool check_heap,
1475 1476 bool check_sample = true);
1476 1477
1477 1478 // *** Stuff related to concurrent marking. It's not clear to me that so
1478 1479 // many of these need to be public.
1479 1480
1480 1481 // The functions below are helper functions that a subclass of
1481 1482 // "CollectedHeap" can use in the implementation of its virtual
1482 1483 // functions.
1483 1484 // This performs a concurrent marking of the live objects in a
1484 1485 // bitmap off to the side.
1485 1486 void doConcurrentMark();
1486 1487
1487 1488 bool isMarkedPrev(oop obj) const;
1488 1489 bool isMarkedNext(oop obj) const;
1489 1490
1490 1491 // vo == UsePrevMarking -> use "prev" marking information,
1491 1492 // vo == UseNextMarking -> use "next" marking information,
1492 1493 // vo == UseMarkWord -> use mark word from object header
1493 1494 bool is_obj_dead_cond(const oop obj,
1494 1495 const HeapRegion* hr,
1495 1496 const VerifyOption vo) const {
1496 1497
1497 1498 switch (vo) {
1498 1499 case VerifyOption_G1UsePrevMarking:
1499 1500 return is_obj_dead(obj, hr);
1500 1501 case VerifyOption_G1UseNextMarking:
1501 1502 return is_obj_ill(obj, hr);
1502 1503 default:
1503 1504 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1504 1505 return !obj->is_gc_marked();
1505 1506 }
1506 1507 }
1507 1508
1508 1509 // Determine if an object is dead, given the object and also
1509 1510 // the region to which the object belongs. An object is dead
1510 1511 // iff a) it was not allocated since the last mark and b) it
1511 1512 // is not marked.
1512 1513
1513 1514 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1514 1515 return
1515 1516 !hr->obj_allocated_since_prev_marking(obj) &&
1516 1517 !isMarkedPrev(obj);
1517 1518 }
1518 1519
1519 1520 // This is used when copying an object to survivor space.
1520 1521 // If the object is marked live, then we mark the copy live.
1521 1522 // If the object is allocated since the start of this mark
1522 1523 // cycle, then we mark the copy live.
1523 1524 // If the object has been around since the previous mark
1524 1525 // phase, and hasn't been marked yet during this phase,
1525 1526 // then we don't mark it, we just wait for the
1526 1527 // current marking cycle to get to it.
1527 1528
1528 1529 // This function returns true when an object has been
1529 1530 // around since the previous marking and hasn't yet
1530 1531 // been marked during this marking.
1531 1532
1532 1533 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1533 1534 return
1534 1535 !hr->obj_allocated_since_next_marking(obj) &&
1535 1536 !isMarkedNext(obj);
1536 1537 }
1537 1538
1538 1539 // Determine if an object is dead, given only the object itself.
1539 1540 // This will find the region to which the object belongs and
1540 1541 // then call the region version of the same function.
1541 1542
1542 1543 // Added if it is in permanent gen it isn't dead.
1543 1544 // Added if it is NULL it isn't dead.
1544 1545
1545 1546 // vo == UsePrevMarking -> use "prev" marking information,
1546 1547 // vo == UseNextMarking -> use "next" marking information,
1547 1548 // vo == UseMarkWord -> use mark word from object header
1548 1549 bool is_obj_dead_cond(const oop obj,
1549 1550 const VerifyOption vo) const {
1550 1551
1551 1552 switch (vo) {
1552 1553 case VerifyOption_G1UsePrevMarking:
1553 1554 return is_obj_dead(obj);
1554 1555 case VerifyOption_G1UseNextMarking:
1555 1556 return is_obj_ill(obj);
1556 1557 default:
1557 1558 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1558 1559 return !obj->is_gc_marked();
1559 1560 }
1560 1561 }
1561 1562
1562 1563 bool is_obj_dead(const oop obj) const {
1563 1564 const HeapRegion* hr = heap_region_containing(obj);
1564 1565 if (hr == NULL) {
1565 1566 if (Universe::heap()->is_in_permanent(obj))
1566 1567 return false;
1567 1568 else if (obj == NULL) return false;
1568 1569 else return true;
1569 1570 }
1570 1571 else return is_obj_dead(obj, hr);
1571 1572 }
1572 1573
1573 1574 bool is_obj_ill(const oop obj) const {
1574 1575 const HeapRegion* hr = heap_region_containing(obj);
1575 1576 if (hr == NULL) {
1576 1577 if (Universe::heap()->is_in_permanent(obj))
1577 1578 return false;
1578 1579 else if (obj == NULL) return false;
1579 1580 else return true;
1580 1581 }
1581 1582 else return is_obj_ill(obj, hr);
1582 1583 }
1583 1584
1584 1585 // The following is just to alert the verification code
1585 1586 // that a full collection has occurred and that the
1586 1587 // remembered sets are no longer up to date.
1587 1588 bool _full_collection;
1588 1589 void set_full_collection() { _full_collection = true;}
1589 1590 void clear_full_collection() {_full_collection = false;}
1590 1591 bool full_collection() {return _full_collection;}
1591 1592
1592 1593 ConcurrentMark* concurrent_mark() const { return _cm; }
1593 1594 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1594 1595
1595 1596 // The dirty cards region list is used to record a subset of regions
1596 1597 // whose cards need clearing. The list if populated during the
1597 1598 // remembered set scanning and drained during the card table
1598 1599 // cleanup. Although the methods are reentrant, population/draining
1599 1600 // phases must not overlap. For synchronization purposes the last
1600 1601 // element on the list points to itself.
1601 1602 HeapRegion* _dirty_cards_region_list;
1602 1603 void push_dirty_cards_region(HeapRegion* hr);
1603 1604 HeapRegion* pop_dirty_cards_region();
1604 1605
1605 1606 public:
1606 1607 void stop_conc_gc_threads();
1607 1608
1608 1609 // <NEW PREDICTION>
1609 1610
1610 1611 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
1611 1612 void check_if_region_is_too_expensive(double predicted_time_ms);
1612 1613 size_t pending_card_num();
1613 1614 size_t max_pending_card_num();
1614 1615 size_t cards_scanned();
1615 1616
1616 1617 // </NEW PREDICTION>
1617 1618
1618 1619 protected:
1619 1620 size_t _max_heap_capacity;
1620 1621 };
1621 1622
1622 1623 #define use_local_bitmaps 1
1623 1624 #define verify_local_bitmaps 0
1624 1625 #define oop_buffer_length 256
1625 1626
1626 1627 #ifndef PRODUCT
1627 1628 class GCLabBitMap;
1628 1629 class GCLabBitMapClosure: public BitMapClosure {
1629 1630 private:
1630 1631 ConcurrentMark* _cm;
1631 1632 GCLabBitMap* _bitmap;
1632 1633
1633 1634 public:
1634 1635 GCLabBitMapClosure(ConcurrentMark* cm,
1635 1636 GCLabBitMap* bitmap) {
1636 1637 _cm = cm;
1637 1638 _bitmap = bitmap;
1638 1639 }
1639 1640
1640 1641 virtual bool do_bit(size_t offset);
1641 1642 };
1642 1643 #endif // !PRODUCT
1643 1644
1644 1645 class GCLabBitMap: public BitMap {
1645 1646 private:
1646 1647 ConcurrentMark* _cm;
1647 1648
1648 1649 int _shifter;
1649 1650 size_t _bitmap_word_covers_words;
1650 1651
1651 1652 // beginning of the heap
1652 1653 HeapWord* _heap_start;
1653 1654
1654 1655 // this is the actual start of the GCLab
1655 1656 HeapWord* _real_start_word;
1656 1657
↓ open down ↓ |
795 lines elided |
↑ open up ↑ |
1657 1658 // this is the actual end of the GCLab
1658 1659 HeapWord* _real_end_word;
1659 1660
1660 1661 // this is the first word, possibly located before the actual start
1661 1662 // of the GCLab, that corresponds to the first bit of the bitmap
1662 1663 HeapWord* _start_word;
1663 1664
1664 1665 // size of a GCLab in words
1665 1666 size_t _gclab_word_size;
1666 1667
1668 + // The size of the marked objects in this bitmap
1669 + size_t _marked_bytes;
1670 +
1671 + // Card bitmap associated with cards spanned by the live objects.
1672 + // It's sized for _gclab_word_size _plus_ 1 additional card
1673 + // in case the lab does not start at a card boundary
1674 + BitMap _card_bm;
1675 +
1676 + // Card number of the base of the LAB. Used to encode
1677 + // indices into the bitmap above.
1678 + intptr_t _bottom_card_num;
1679 +
1667 1680 static int shifter() {
1668 1681 return MinObjAlignment - 1;
1669 1682 }
1670 1683
1671 1684 // how many heap words does a single bitmap word corresponds to?
1672 1685 static size_t bitmap_word_covers_words() {
1673 1686 return BitsPerWord << shifter();
1674 1687 }
1675 1688
1676 1689 size_t gclab_word_size() const {
1677 1690 return _gclab_word_size;
1678 1691 }
1679 1692
1680 1693 // Calculates actual GCLab size in words
1681 1694 size_t gclab_real_word_size() const {
1682 1695 return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
1683 1696 / BitsPerWord;
1684 1697 }
1685 1698
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
1686 1699 static size_t bitmap_size_in_bits(size_t gclab_word_size) {
1687 1700 size_t bits_in_bitmap = gclab_word_size >> shifter();
1688 1701 // We are going to ensure that the beginning of a word in this
1689 1702 // bitmap also corresponds to the beginning of a word in the
1690 1703 // global marking bitmap. To handle the case where a GCLab
1691 1704 // starts from the middle of the bitmap, we need to add enough
1692 1705 // space (i.e. up to a bitmap word) to ensure that we have
1693 1706 // enough bits in the bitmap.
1694 1707 return bits_in_bitmap + BitsPerWord - 1;
1695 1708 }
1709 +
1696 1710 public:
1697 1711 GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
1698 1712 : BitMap(bitmap_size_in_bits(gclab_word_size)),
1699 1713 _cm(G1CollectedHeap::heap()->concurrent_mark()),
1700 1714 _shifter(shifter()),
1701 1715 _bitmap_word_covers_words(bitmap_word_covers_words()),
1702 1716 _heap_start(heap_start),
1703 1717 _gclab_word_size(gclab_word_size),
1704 1718 _real_start_word(NULL),
1705 1719 _real_end_word(NULL),
1706 - _start_word(NULL)
1720 + _start_word(NULL),
1721 + _marked_bytes(0),
1722 + _card_bm((((gclab_word_size * BytesPerWord) + CardTableModRefBS::card_size - 1) >>
1723 + CardTableModRefBS::card_shift) + 1,
1724 + false /* in_resource_area*/),
1725 + _bottom_card_num(0)
1707 1726 {
1708 1727 guarantee( size_in_words() >= bitmap_size_in_words(),
1709 1728 "just making sure");
1710 1729 }
1711 1730
1712 1731 inline unsigned heapWordToOffset(HeapWord* addr) {
1713 1732 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
1714 1733 assert(offset < size(), "offset should be within bounds");
1715 1734 return offset;
1716 1735 }
1717 1736
1718 1737 inline HeapWord* offsetToHeapWord(size_t offset) {
1719 1738 HeapWord* addr = _start_word + (offset << _shifter);
1720 1739 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
1721 1740 return addr;
1722 1741 }
1723 1742
1724 1743 bool fields_well_formed() {
1725 1744 bool ret1 = (_real_start_word == NULL) &&
1726 1745 (_real_end_word == NULL) &&
1727 1746 (_start_word == NULL);
1728 1747 if (ret1)
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
1729 1748 return true;
1730 1749
1731 1750 bool ret2 = _real_start_word >= _start_word &&
1732 1751 _start_word < _real_end_word &&
1733 1752 (_real_start_word + _gclab_word_size) == _real_end_word &&
1734 1753 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
1735 1754 > _real_end_word;
1736 1755 return ret2;
1737 1756 }
1738 1757
1739 - inline bool mark(HeapWord* addr) {
1758 + inline bool mark(HeapWord* addr, size_t obj_size) {
1740 1759 guarantee(use_local_bitmaps, "invariant");
1741 1760 assert(fields_well_formed(), "invariant");
1742 1761
1743 1762 if (addr >= _real_start_word && addr < _real_end_word) {
1744 1763 assert(!isMarked(addr), "should not have already been marked");
1745 1764
1746 1765 // first mark it on the bitmap
1747 1766 at_put(heapWordToOffset(addr), true);
1748 1767
1768 + MemRegion mr(addr, obj_size);
1769 +
1770 + // Add to the marked_bytes total
1771 + _marked_bytes += mr.byte_size();
1772 +
1773 + // Set the bits associated with the cards spanned by the
1774 + // object
1775 + intptr_t start_card_num =
1776 + intptr_t(uintptr_t(mr.start()) >> CardTableModRefBS::card_shift);
1777 + intptr_t last_card_num =
1778 + intptr_t(uintptr_t(mr.last()) >> CardTableModRefBS::card_shift);
1779 +
1780 + BitMap::idx_t start_idx = start_card_num - _bottom_card_num;
1781 + BitMap::idx_t last_idx = last_card_num - _bottom_card_num;
1782 +
1783 + _card_bm.set_range(start_idx, last_idx);
1784 + // set_range is exclusive so set the bit for last_idx
1785 + _card_bm.set_bit(last_idx);
1786 +
1749 1787 return true;
1750 1788 } else {
1751 1789 return false;
1752 1790 }
1753 1791 }
1754 1792
1755 1793 inline bool isMarked(HeapWord* addr) {
1756 1794 guarantee(use_local_bitmaps, "invariant");
1757 1795 assert(fields_well_formed(), "invariant");
1758 1796
1759 1797 return at(heapWordToOffset(addr));
1760 1798 }
1761 1799
1762 1800 void set_buffer(HeapWord* start) {
1763 1801 guarantee(use_local_bitmaps, "invariant");
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1764 1802 clear();
1765 1803
1766 1804 assert(start != NULL, "invariant");
1767 1805 _real_start_word = start;
1768 1806 _real_end_word = start + _gclab_word_size;
1769 1807
1770 1808 size_t diff =
1771 1809 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
1772 1810 _start_word = start - diff;
1773 1811
1812 + // Initialize _bottom_card_num for the card bitmap
1813 + _bottom_card_num =
1814 + intptr_t(uintptr_t(_real_start_word) >> CardTableModRefBS::card_shift);
1815 +
1816 + _marked_bytes = 0;
1817 + _card_bm.clear();
1818 +
1774 1819 assert(fields_well_formed(), "invariant");
1775 1820 }
1776 1821
1777 1822 #ifndef PRODUCT
1778 1823 void verify() {
1779 1824 // verify that the marks have been propagated
1780 1825 GCLabBitMapClosure cl(_cm, this);
1781 1826 iterate(&cl);
1782 1827 }
1783 1828 #endif // PRODUCT
1784 1829
1785 - void retire() {
1830 + void retire(int worker_i) {
1786 1831 guarantee(use_local_bitmaps, "invariant");
1787 1832 assert(fields_well_formed(), "invariant");
1788 1833
1789 1834 if (_start_word != NULL) {
1790 1835 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
1791 1836
1792 1837 // this means that the bitmap was set up for the GCLab
1793 1838 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
1794 1839
1795 1840 mark_bitmap->mostly_disjoint_range_union(this,
1796 1841 0, // always start from the start of the bitmap
1797 1842 _start_word,
1798 1843 gclab_real_word_size());
1799 - _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
1844 +
1845 + // Note that not all objects copied into the LAB will have a bit set
1846 + // in the LAB bitmap (the LAB bitmap is used to propagate marks).
1847 + // So we can't just add the entire lab and its bitmap to the count
1848 + // data. The marking information has been recorded in _marked_bytes
1849 + // and _card_bm.
1850 + MemRegion lab_region(_real_start_word, _real_end_word);
1851 + _cm->add_to_count_data_for_region(lab_region,
1852 + &_card_bm,
1853 + _bottom_card_num,
1854 + _marked_bytes,
1855 + worker_i);
1856 +
1857 + _cm->grayRegionIfNecessary(lab_region);
1800 1858
1801 1859 #ifndef PRODUCT
1802 1860 if (use_local_bitmaps && verify_local_bitmaps)
1803 1861 verify();
1804 1862 #endif // PRODUCT
1805 1863 } else {
1806 1864 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
1807 1865 }
1808 1866 }
1809 1867
1810 1868 size_t bitmap_size_in_words() const {
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1811 1869 return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
1812 1870 }
1813 1871
1814 1872 };
1815 1873
1816 1874 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1817 1875 private:
1818 1876 bool _retired;
1819 1877 bool _should_mark_objects;
1820 1878 GCLabBitMap _bitmap;
1879 + int _worker_i;
1821 1880
1822 1881 public:
1823 - G1ParGCAllocBuffer(size_t gclab_word_size);
1882 + G1ParGCAllocBuffer(size_t gclab_word_size, int worker_i);
1824 1883
1825 - inline bool mark(HeapWord* addr) {
1884 + inline bool mark(HeapWord* addr, size_t obj_size) {
1826 1885 guarantee(use_local_bitmaps, "invariant");
1827 1886 assert(_should_mark_objects, "invariant");
1828 - return _bitmap.mark(addr);
1887 + return _bitmap.mark(addr, obj_size);
1829 1888 }
1830 1889
1831 1890 inline void set_buf(HeapWord* buf) {
1832 1891 if (use_local_bitmaps && _should_mark_objects) {
1833 1892 _bitmap.set_buffer(buf);
1834 1893 }
1835 1894 ParGCAllocBuffer::set_buf(buf);
1836 1895 _retired = false;
1837 1896 }
1838 1897
1839 1898 inline void retire(bool end_of_gc, bool retain) {
1840 1899 if (_retired)
1841 1900 return;
1842 1901 if (use_local_bitmaps && _should_mark_objects) {
1843 - _bitmap.retire();
1902 + _bitmap.retire(_worker_i);
1844 1903 }
1845 1904 ParGCAllocBuffer::retire(end_of_gc, retain);
1846 1905 _retired = true;
1847 1906 }
1848 1907 };
1849 1908
1850 1909 class G1ParScanThreadState : public StackObj {
1851 1910 protected:
1852 1911 G1CollectedHeap* _g1h;
1853 1912 RefToScanQueue* _refs;
1854 1913 DirtyCardQueue _dcq;
1855 1914 CardTableModRefBS* _ct_bs;
1856 1915 G1RemSet* _g1_rem;
1857 1916
1858 1917 G1ParGCAllocBuffer _surviving_alloc_buffer;
1859 1918 G1ParGCAllocBuffer _tenured_alloc_buffer;
1860 1919 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1861 1920 ageTable _age_table;
1862 1921
1863 1922 size_t _alloc_buffer_waste;
1864 1923 size_t _undo_waste;
1865 1924
1866 1925 OopsInHeapRegionClosure* _evac_failure_cl;
1867 1926 G1ParScanHeapEvacClosure* _evac_cl;
1868 1927 G1ParScanPartialArrayClosure* _partial_scan_cl;
1869 1928
1870 1929 int _hash_seed;
1871 1930 int _queue_num;
1872 1931
1873 1932 size_t _term_attempts;
1874 1933
1875 1934 double _start;
1876 1935 double _start_strong_roots;
1877 1936 double _strong_roots_time;
1878 1937 double _start_term;
1879 1938 double _term_time;
1880 1939
1881 1940 // Map from young-age-index (0 == not young, 1 is youngest) to
1882 1941 // surviving words. base is what we get back from the malloc call
1883 1942 size_t* _surviving_young_words_base;
1884 1943 // this points into the array, as we use the first few entries for padding
1885 1944 size_t* _surviving_young_words;
1886 1945
1887 1946 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1888 1947
1889 1948 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1890 1949
1891 1950 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1892 1951
1893 1952 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1894 1953 CardTableModRefBS* ctbs() { return _ct_bs; }
1895 1954
1896 1955 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
1897 1956 if (!from->is_survivor()) {
1898 1957 _g1_rem->par_write_ref(from, p, tid);
1899 1958 }
1900 1959 }
1901 1960
1902 1961 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1903 1962 // If the new value of the field points to the same region or
1904 1963 // is the to-space, we don't need to include it in the Rset updates.
1905 1964 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1906 1965 size_t card_index = ctbs()->index_for(p);
1907 1966 // If the card hasn't been added to the buffer, do it.
1908 1967 if (ctbs()->mark_card_deferred(card_index)) {
1909 1968 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1910 1969 }
1911 1970 }
1912 1971 }
1913 1972
1914 1973 public:
1915 1974 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
1916 1975
1917 1976 ~G1ParScanThreadState() {
1918 1977 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
1919 1978 }
1920 1979
1921 1980 RefToScanQueue* refs() { return _refs; }
1922 1981 ageTable* age_table() { return &_age_table; }
1923 1982
1924 1983 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1925 1984 return _alloc_buffers[purpose];
1926 1985 }
1927 1986
1928 1987 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1929 1988 size_t undo_waste() const { return _undo_waste; }
1930 1989
1931 1990 #ifdef ASSERT
1932 1991 bool verify_ref(narrowOop* ref) const;
1933 1992 bool verify_ref(oop* ref) const;
1934 1993 bool verify_task(StarTask ref) const;
1935 1994 #endif // ASSERT
1936 1995
1937 1996 template <class T> void push_on_queue(T* ref) {
1938 1997 assert(verify_ref(ref), "sanity");
1939 1998 refs()->push(ref);
1940 1999 }
1941 2000
1942 2001 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1943 2002 if (G1DeferredRSUpdate) {
1944 2003 deferred_rs_update(from, p, tid);
1945 2004 } else {
1946 2005 immediate_rs_update(from, p, tid);
1947 2006 }
1948 2007 }
1949 2008
1950 2009 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1951 2010
1952 2011 HeapWord* obj = NULL;
1953 2012 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1954 2013 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1955 2014 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1956 2015 assert(gclab_word_size == alloc_buf->word_sz(),
1957 2016 "dynamic resizing is not supported");
1958 2017 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1959 2018 alloc_buf->retire(false, false);
1960 2019
1961 2020 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1962 2021 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1963 2022 // Otherwise.
1964 2023 alloc_buf->set_buf(buf);
1965 2024
1966 2025 obj = alloc_buf->allocate(word_sz);
1967 2026 assert(obj != NULL, "buffer was definitely big enough...");
1968 2027 } else {
1969 2028 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1970 2029 }
1971 2030 return obj;
1972 2031 }
1973 2032
1974 2033 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1975 2034 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1976 2035 if (obj != NULL) return obj;
1977 2036 return allocate_slow(purpose, word_sz);
1978 2037 }
1979 2038
1980 2039 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1981 2040 if (alloc_buffer(purpose)->contains(obj)) {
1982 2041 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1983 2042 "should contain whole object");
1984 2043 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1985 2044 } else {
1986 2045 CollectedHeap::fill_with_object(obj, word_sz);
1987 2046 add_to_undo_waste(word_sz);
1988 2047 }
1989 2048 }
1990 2049
1991 2050 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
1992 2051 _evac_failure_cl = evac_failure_cl;
1993 2052 }
1994 2053 OopsInHeapRegionClosure* evac_failure_closure() {
1995 2054 return _evac_failure_cl;
1996 2055 }
1997 2056
1998 2057 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
1999 2058 _evac_cl = evac_cl;
2000 2059 }
2001 2060
2002 2061 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
2003 2062 _partial_scan_cl = partial_scan_cl;
2004 2063 }
2005 2064
2006 2065 int* hash_seed() { return &_hash_seed; }
2007 2066 int queue_num() { return _queue_num; }
2008 2067
2009 2068 size_t term_attempts() const { return _term_attempts; }
2010 2069 void note_term_attempt() { _term_attempts++; }
2011 2070
2012 2071 void start_strong_roots() {
2013 2072 _start_strong_roots = os::elapsedTime();
2014 2073 }
2015 2074 void end_strong_roots() {
2016 2075 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
2017 2076 }
2018 2077 double strong_roots_time() const { return _strong_roots_time; }
2019 2078
2020 2079 void start_term_time() {
2021 2080 note_term_attempt();
2022 2081 _start_term = os::elapsedTime();
2023 2082 }
2024 2083 void end_term_time() {
2025 2084 _term_time += (os::elapsedTime() - _start_term);
2026 2085 }
2027 2086 double term_time() const { return _term_time; }
2028 2087
2029 2088 double elapsed_time() const {
2030 2089 return os::elapsedTime() - _start;
2031 2090 }
2032 2091
2033 2092 static void
2034 2093 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
2035 2094 void
2036 2095 print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
2037 2096
2038 2097 size_t* surviving_young_words() {
2039 2098 // We add on to hide entry 0 which accumulates surviving words for
2040 2099 // age -1 regions (i.e. non-young ones)
2041 2100 return _surviving_young_words;
2042 2101 }
2043 2102
2044 2103 void retire_alloc_buffers() {
2045 2104 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2046 2105 size_t waste = _alloc_buffers[ap]->words_remaining();
2047 2106 add_to_alloc_buffer_waste(waste);
2048 2107 _alloc_buffers[ap]->retire(true, false);
2049 2108 }
2050 2109 }
2051 2110
2052 2111 template <class T> void deal_with_reference(T* ref_to_scan) {
2053 2112 if (has_partial_array_mask(ref_to_scan)) {
2054 2113 _partial_scan_cl->do_oop_nv(ref_to_scan);
2055 2114 } else {
2056 2115 // Note: we can use "raw" versions of "region_containing" because
2057 2116 // "obj_to_scan" is definitely in the heap, and is not in a
2058 2117 // humongous region.
2059 2118 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
2060 2119 _evac_cl->set_region(r);
2061 2120 _evac_cl->do_oop_nv(ref_to_scan);
2062 2121 }
2063 2122 }
2064 2123
2065 2124 void deal_with_reference(StarTask ref) {
2066 2125 assert(verify_task(ref), "sanity");
2067 2126 if (ref.is_narrow()) {
2068 2127 deal_with_reference((narrowOop*)ref);
2069 2128 } else {
2070 2129 deal_with_reference((oop*)ref);
2071 2130 }
2072 2131 }
2073 2132
2074 2133 public:
2075 2134 void trim_queue();
2076 2135 };
2077 2136
2078 2137 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
↓ open down ↓ |
225 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX