Print this page
G1: Use SoftMaxHeapSize to guide GC heuristics
Split |
Close |
Expand all |
Collapse all |
--- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
26 26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
27 27
28 28 #include "gc/g1/g1BarrierSet.hpp"
29 29 #include "gc/g1/g1BiasedArray.hpp"
30 30 #include "gc/g1/g1CardTable.hpp"
31 31 #include "gc/g1/g1CollectionSet.hpp"
32 32 #include "gc/g1/g1CollectorState.hpp"
33 33 #include "gc/g1/g1ConcurrentMark.hpp"
34 34 #include "gc/g1/g1EdenRegions.hpp"
35 35 #include "gc/g1/g1EvacFailure.hpp"
36 36 #include "gc/g1/g1EvacStats.hpp"
37 37 #include "gc/g1/g1EvacuationInfo.hpp"
38 38 #include "gc/g1/g1GCPhaseTimes.hpp"
39 39 #include "gc/g1/g1HeapTransition.hpp"
40 40 #include "gc/g1/g1HeapVerifier.hpp"
41 41 #include "gc/g1/g1HRPrinter.hpp"
42 42 #include "gc/g1/g1HeapRegionAttr.hpp"
43 43 #include "gc/g1/g1MonitoringSupport.hpp"
44 44 #include "gc/g1/g1NUMA.hpp"
45 45 #include "gc/g1/g1RedirtyCardsQueue.hpp"
46 46 #include "gc/g1/g1SurvivorRegions.hpp"
47 47 #include "gc/g1/g1YCTypes.hpp"
48 48 #include "gc/g1/heapRegionManager.hpp"
49 49 #include "gc/g1/heapRegionSet.hpp"
50 50 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
51 51 #include "gc/shared/barrierSet.hpp"
52 52 #include "gc/shared/collectedHeap.hpp"
53 53 #include "gc/shared/gcHeapSummary.hpp"
54 54 #include "gc/shared/plab.hpp"
55 55 #include "gc/shared/preservedMarks.hpp"
56 56 #include "gc/shared/softRefPolicy.hpp"
57 57 #include "memory/memRegion.hpp"
58 58 #include "utilities/stack.hpp"
59 59
60 60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
61 61 // It uses the "Garbage First" heap organization and algorithm, which
62 62 // may combine concurrent marking with parallel, incremental compaction of
63 63 // heap subsets that will yield large amounts of garbage.
64 64
65 65 // Forward declarations
66 66 class HeapRegion;
67 67 class GenerationSpec;
68 68 class G1ParScanThreadState;
69 69 class G1ParScanThreadStateSet;
70 70 class G1ParScanThreadState;
71 71 class MemoryPool;
72 72 class MemoryManager;
73 73 class ObjectClosure;
74 74 class SpaceClosure;
75 75 class CompactibleSpaceClosure;
76 76 class Space;
77 77 class G1CardTableEntryClosure;
78 78 class G1CollectionSet;
79 79 class G1Policy;
80 80 class G1HotCardCache;
81 81 class G1RemSet;
82 82 class G1YoungRemSetSamplingThread;
83 83 class G1ConcurrentMark;
84 84 class G1ConcurrentMarkThread;
85 85 class G1ConcurrentRefine;
86 86 class GenerationCounters;
87 87 class STWGCTimer;
88 88 class G1NewTracer;
89 89 class EvacuationFailedInfo;
90 90 class nmethod;
91 91 class WorkGang;
92 92 class G1Allocator;
93 93 class G1ArchiveAllocator;
94 94 class G1FullGCScope;
95 95 class G1HeapVerifier;
96 96 class G1HeapSizingPolicy;
97 97 class G1HeapSummary;
98 98 class G1EvacSummary;
99 99
100 100 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
101 101 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
102 102
103 103 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
104 104 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
105 105
106 106 // The G1 STW is alive closure.
107 107 // An instance is embedded into the G1CH and used as the
108 108 // (optional) _is_alive_non_header closure in the STW
109 109 // reference processor. It is also extensively used during
110 110 // reference processing during STW evacuation pauses.
111 111 class G1STWIsAliveClosure : public BoolObjectClosure {
112 112 G1CollectedHeap* _g1h;
113 113 public:
114 114 G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
115 115 bool do_object_b(oop p);
116 116 };
117 117
118 118 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
119 119 G1CollectedHeap* _g1h;
120 120 public:
121 121 G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
122 122 bool do_object_b(oop p);
123 123 };
124 124
125 125 class G1RegionMappingChangedListener : public G1MappingChangedListener {
126 126 private:
127 127 void reset_from_card_cache(uint start_idx, size_t num_regions);
128 128 public:
129 129 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
130 130 };
131 131
132 132 class G1CollectedHeap : public CollectedHeap {
133 133 friend class VM_CollectForMetadataAllocation;
134 134 friend class VM_G1CollectForAllocation;
135 135 friend class VM_G1CollectFull;
136 136 friend class VM_G1TryInitiateConcMark;
137 137 friend class VMStructs;
138 138 friend class MutatorAllocRegion;
139 139 friend class G1FullCollector;
140 140 friend class G1GCAllocRegion;
141 141 friend class G1HeapVerifier;
142 142
143 143 // Closures used in implementation.
144 144 friend class G1ParScanThreadState;
145 145 friend class G1ParScanThreadStateSet;
146 146 friend class G1EvacuateRegionsTask;
147 147 friend class G1PLABAllocator;
148 148
149 149 // Other related classes.
150 150 friend class HeapRegionClaimer;
151 151
152 152 // Testing classes.
153 153 friend class G1CheckRegionAttrTableClosure;
154 154
155 155 private:
156 156 G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
157 157
158 158 WorkGang* _workers;
159 159 G1CardTable* _card_table;
160 160
161 161 SoftRefPolicy _soft_ref_policy;
162 162
163 163 static size_t _humongous_object_threshold_in_words;
164 164
165 165 // These sets keep track of old, archive and humongous regions respectively.
166 166 HeapRegionSet _old_set;
167 167 HeapRegionSet _archive_set;
168 168 HeapRegionSet _humongous_set;
169 169
170 170 void eagerly_reclaim_humongous_regions();
171 171 // Start a new incremental collection set for the next pause.
172 172 void start_new_collection_set();
173 173
174 174 // The block offset table for the G1 heap.
175 175 G1BlockOffsetTable* _bot;
176 176
177 177 // Tears down the region sets / lists so that they are empty and the
178 178 // regions on the heap do not belong to a region set / list. The
179 179 // only exception is the humongous set which we leave unaltered. If
180 180 // free_list_only is true, it will only tear down the master free
181 181 // list. It is called before a Full GC (free_list_only == false) or
182 182 // before heap shrinking (free_list_only == true).
183 183 void tear_down_region_sets(bool free_list_only);
184 184
185 185 // Rebuilds the region sets / lists so that they are repopulated to
186 186 // reflect the contents of the heap. The only exception is the
187 187 // humongous set which was not torn down in the first place. If
188 188 // free_list_only is true, it will only rebuild the master free
189 189 // list. It is called after a Full GC (free_list_only == false) or
190 190 // after heap shrinking (free_list_only == true).
191 191 void rebuild_region_sets(bool free_list_only);
192 192
193 193 // Callback for region mapping changed events.
194 194 G1RegionMappingChangedListener _listener;
195 195
196 196 // Handle G1 NUMA support.
197 197 G1NUMA* _numa;
198 198
199 199 // The sequence of all heap regions in the heap.
200 200 HeapRegionManager* _hrm;
201 201
202 202 // Manages all allocations with regions except humongous object allocations.
203 203 G1Allocator* _allocator;
204 204
205 205 // Manages all heap verification.
206 206 G1HeapVerifier* _verifier;
207 207
208 208 // Outside of GC pauses, the number of bytes used in all regions other
209 209 // than the current allocation region(s).
210 210 volatile size_t _summary_bytes_used;
211 211
212 212 void increase_used(size_t bytes);
213 213 void decrease_used(size_t bytes);
214 214
215 215 void set_used(size_t bytes);
216 216
217 217 // Number of bytes used in all regions during GC. Typically changed when
218 218 // retiring a GC alloc region.
219 219 size_t _bytes_used_during_gc;
220 220
221 221 // Class that handles archive allocation ranges.
222 222 G1ArchiveAllocator* _archive_allocator;
223 223
224 224 // GC allocation statistics policy for survivors.
225 225 G1EvacStats _survivor_evac_stats;
226 226
227 227 // GC allocation statistics policy for tenured objects.
228 228 G1EvacStats _old_evac_stats;
229 229
230 230 // It specifies whether we should attempt to expand the heap after a
231 231 // region allocation failure. If heap expansion fails we set this to
232 232 // false so that we don't re-attempt the heap expansion (it's likely
233 233 // that subsequent expansion attempts will also fail if one fails).
234 234 // Currently, it is only consulted during GC and it's reset at the
235 235 // start of each GC.
236 236 bool _expand_heap_after_alloc_failure;
237 237
238 238 // Helper for monitoring and management support.
239 239 G1MonitoringSupport* _g1mm;
240 240
241 241 // Records whether the region at the given index is (still) a
242 242 // candidate for eager reclaim. Only valid for humongous start
243 243 // regions; other regions have unspecified values. Humongous start
244 244 // regions are initialized at start of collection pause, with
245 245 // candidates removed from the set as they are found reachable from
246 246 // roots or the young generation.
247 247 class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
248 248 protected:
249 249 bool default_value() const { return false; }
250 250 public:
251 251 void clear() { G1BiasedMappedArray<bool>::clear(); }
252 252 void set_candidate(uint region, bool value) {
253 253 set_by_index(region, value);
254 254 }
255 255 bool is_candidate(uint region) {
256 256 return get_by_index(region);
257 257 }
258 258 };
259 259
260 260 HumongousReclaimCandidates _humongous_reclaim_candidates;
261 261 // Stores whether during humongous object registration we found candidate regions.
262 262 // If not, we can skip a few steps.
263 263 bool _has_humongous_reclaim_candidates;
264 264
265 265 G1HRPrinter _hr_printer;
266 266
267 267 // Return true if an explicit GC should start a concurrent cycle instead
268 268 // of doing a STW full GC. A concurrent cycle should be started if:
269 269 // (a) cause == _g1_humongous_allocation,
270 270 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
271 271 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
272 272 // (d) cause == _wb_conc_mark,
273 273 // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
274 274 bool should_do_concurrent_full_gc(GCCause::Cause cause);
275 275
276 276 // Attempt to start a concurrent cycle with the indicated cause.
277 277 // precondition: should_do_concurrent_full_gc(cause)
278 278 bool try_collect_concurrently(GCCause::Cause cause,
279 279 uint gc_counter,
280 280 uint old_marking_started_before);
281 281
282 282 // Return true if should upgrade to full gc after an incremental one.
283 283 bool should_upgrade_to_full_gc(GCCause::Cause cause);
284 284
285 285 // indicates whether we are in young or mixed GC mode
286 286 G1CollectorState _collector_state;
287 287
288 288 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
289 289 // concurrent cycles) we have started.
290 290 volatile uint _old_marking_cycles_started;
291 291
292 292 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
293 293 // concurrent cycles) we have completed.
294 294 volatile uint _old_marking_cycles_completed;
295 295
296 296 // This is a non-product method that is helpful for testing. It is
297 297 // called at the end of a GC and artificially expands the heap by
298 298 // allocating a number of dead regions. This way we can induce very
299 299 // frequent marking cycles and stress the cleanup / concurrent
300 300 // cleanup code more (as all the regions that will be allocated by
301 301 // this method will be found dead by the marking cycle).
302 302 void allocate_dummy_regions() PRODUCT_RETURN;
303 303
304 304 // If the HR printer is active, dump the state of the regions in the
305 305 // heap after a compaction.
306 306 void print_hrm_post_compaction();
307 307
308 308 // Create a memory mapper for auxiliary data structures of the given size and
309 309 // translation factor.
310 310 static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
311 311 size_t size,
312 312 size_t translation_factor);
313 313
314 314 void trace_heap(GCWhen::Type when, const GCTracer* tracer);
315 315
316 316 // These are macros so that, if the assert fires, we get the correct
317 317 // line number, file, etc.
318 318
319 319 #define heap_locking_asserts_params(_extra_message_) \
320 320 "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
321 321 (_extra_message_), \
322 322 BOOL_TO_STR(Heap_lock->owned_by_self()), \
323 323 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
324 324 BOOL_TO_STR(Thread::current()->is_VM_thread())
325 325
326 326 #define assert_heap_locked() \
327 327 do { \
328 328 assert(Heap_lock->owned_by_self(), \
329 329 heap_locking_asserts_params("should be holding the Heap_lock")); \
330 330 } while (0)
331 331
332 332 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
333 333 do { \
334 334 assert(Heap_lock->owned_by_self() || \
335 335 (SafepointSynchronize::is_at_safepoint() && \
336 336 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
337 337 heap_locking_asserts_params("should be holding the Heap_lock or " \
338 338 "should be at a safepoint")); \
339 339 } while (0)
340 340
341 341 #define assert_heap_locked_and_not_at_safepoint() \
342 342 do { \
343 343 assert(Heap_lock->owned_by_self() && \
344 344 !SafepointSynchronize::is_at_safepoint(), \
345 345 heap_locking_asserts_params("should be holding the Heap_lock and " \
346 346 "should not be at a safepoint")); \
347 347 } while (0)
348 348
349 349 #define assert_heap_not_locked() \
350 350 do { \
351 351 assert(!Heap_lock->owned_by_self(), \
352 352 heap_locking_asserts_params("should not be holding the Heap_lock")); \
353 353 } while (0)
354 354
355 355 #define assert_heap_not_locked_and_not_at_safepoint() \
356 356 do { \
357 357 assert(!Heap_lock->owned_by_self() && \
358 358 !SafepointSynchronize::is_at_safepoint(), \
359 359 heap_locking_asserts_params("should not be holding the Heap_lock and " \
360 360 "should not be at a safepoint")); \
361 361 } while (0)
362 362
363 363 #define assert_at_safepoint_on_vm_thread() \
364 364 do { \
365 365 assert_at_safepoint(); \
366 366 assert(Thread::current_or_null() != NULL, "no current thread"); \
367 367 assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
368 368 } while (0)
369 369
370 370 #ifdef ASSERT
371 371 #define assert_used_and_recalculate_used_equal(g1h) \
372 372 do { \
373 373 size_t cur_used_bytes = g1h->used(); \
374 374 size_t recal_used_bytes = g1h->recalculate_used(); \
375 375 assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \
376 376 " same as recalculated used(" SIZE_FORMAT ").", \
377 377 cur_used_bytes, recal_used_bytes); \
378 378 } while (0)
379 379 #else
380 380 #define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
381 381 #endif
382 382
383 383 const char* young_gc_name() const;
384 384
385 385 // The young region list.
386 386 G1EdenRegions _eden;
387 387 G1SurvivorRegions _survivor;
388 388
389 389 STWGCTimer* _gc_timer_stw;
390 390
391 391 G1NewTracer* _gc_tracer_stw;
392 392
393 393 // The current policy object for the collector.
394 394 G1Policy* _policy;
395 395 G1HeapSizingPolicy* _heap_sizing_policy;
396 396
397 397 G1CollectionSet _collection_set;
398 398
399 399 // Try to allocate a single non-humongous HeapRegion sufficient for
400 400 // an allocation of the given word_size. If do_expand is true,
401 401 // attempt to expand the heap if necessary to satisfy the allocation
402 402 // request. 'type' takes the type of region to be allocated. (Use constants
403 403 // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
404 404 HeapRegion* new_region(size_t word_size,
405 405 HeapRegionType type,
406 406 bool do_expand,
407 407 uint node_index = G1NUMA::AnyNodeIndex);
408 408
409 409 // Initialize a contiguous set of free regions of length num_regions
410 410 // and starting at index first so that they appear as a single
411 411 // humongous region.
412 412 HeapWord* humongous_obj_allocate_initialize_regions(uint first,
413 413 uint num_regions,
414 414 size_t word_size);
415 415
416 416 // Attempt to allocate a humongous object of the given size. Return
417 417 // NULL if unsuccessful.
418 418 HeapWord* humongous_obj_allocate(size_t word_size);
419 419
420 420 // The following two methods, allocate_new_tlab() and
421 421 // mem_allocate(), are the two main entry points from the runtime
422 422 // into the G1's allocation routines. They have the following
423 423 // assumptions:
424 424 //
425 425 // * They should both be called outside safepoints.
426 426 //
427 427 // * They should both be called without holding the Heap_lock.
428 428 //
429 429 // * All allocation requests for new TLABs should go to
430 430 // allocate_new_tlab().
431 431 //
432 432 // * All non-TLAB allocation requests should go to mem_allocate().
433 433 //
434 434 // * If either call cannot satisfy the allocation request using the
435 435 // current allocating region, they will try to get a new one. If
436 436 // this fails, they will attempt to do an evacuation pause and
437 437 // retry the allocation.
438 438 //
439 439 // * If all allocation attempts fail, even after trying to schedule
440 440 // an evacuation pause, allocate_new_tlab() will return NULL,
441 441 // whereas mem_allocate() will attempt a heap expansion and/or
442 442 // schedule a Full GC.
443 443 //
444 444 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
445 445 // should never be called with word_size being humongous. All
446 446 // humongous allocation requests should go to mem_allocate() which
447 447 // will satisfy them with a special path.
448 448
449 449 virtual HeapWord* allocate_new_tlab(size_t min_size,
450 450 size_t requested_size,
451 451 size_t* actual_size);
452 452
453 453 virtual HeapWord* mem_allocate(size_t word_size,
454 454 bool* gc_overhead_limit_was_exceeded);
455 455
456 456 // First-level mutator allocation attempt: try to allocate out of
457 457 // the mutator alloc region without taking the Heap_lock. This
458 458 // should only be used for non-humongous allocations.
459 459 inline HeapWord* attempt_allocation(size_t min_word_size,
460 460 size_t desired_word_size,
461 461 size_t* actual_word_size);
462 462
463 463 // Second-level mutator allocation attempt: take the Heap_lock and
464 464 // retry the allocation attempt, potentially scheduling a GC
465 465 // pause. This should only be used for non-humongous allocations.
466 466 HeapWord* attempt_allocation_slow(size_t word_size);
467 467
468 468 // Takes the Heap_lock and attempts a humongous allocation. It can
469 469 // potentially schedule a GC pause.
470 470 HeapWord* attempt_allocation_humongous(size_t word_size);
471 471
472 472 // Allocation attempt that should be called during safepoints (e.g.,
473 473 // at the end of a successful GC). expect_null_mutator_alloc_region
474 474 // specifies whether the mutator alloc region is expected to be NULL
475 475 // or not.
476 476 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
477 477 bool expect_null_mutator_alloc_region);
478 478
479 479 // These methods are the "callbacks" from the G1AllocRegion class.
480 480
481 481 // For mutator alloc regions.
482 482 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
483 483 void retire_mutator_alloc_region(HeapRegion* alloc_region,
484 484 size_t allocated_bytes);
485 485
486 486 // For GC alloc regions.
487 487 bool has_more_regions(G1HeapRegionAttr dest);
488 488 HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
489 489 void retire_gc_alloc_region(HeapRegion* alloc_region,
490 490 size_t allocated_bytes, G1HeapRegionAttr dest);
491 491
492 492 // - if explicit_gc is true, the GC is for a System.gc() etc,
493 493 // otherwise it's for a failed allocation.
494 494 // - if clear_all_soft_refs is true, all soft references should be
495 495 // cleared during the GC.
496 496 // - it returns false if it is unable to do the collection due to the
497 497 // GC locker being active, true otherwise.
498 498 bool do_full_collection(bool explicit_gc,
499 499 bool clear_all_soft_refs);
500 500
501 501 // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
502 502 virtual void do_full_collection(bool clear_all_soft_refs);
503 503
504 504 // Callback from VM_G1CollectForAllocation operation.
505 505 // This function does everything necessary/possible to satisfy a
506 506 // failed allocation request (including collection, expansion, etc.)
507 507 HeapWord* satisfy_failed_allocation(size_t word_size,
508 508 bool* succeeded);
509 509 // Internal helpers used during full GC to split it up to
510 510 // increase readability.
511 511 void abort_concurrent_cycle();
512 512 void verify_before_full_collection(bool explicit_gc);
513 513 void prepare_heap_for_full_collection();
514 514 void prepare_heap_for_mutators();
515 515 void abort_refinement();
516 516 void verify_after_full_collection();
517 517 void print_heap_after_full_collection(G1HeapTransition* heap_transition);
518 518
519 519 // Helper method for satisfy_failed_allocation()
520 520 HeapWord* satisfy_failed_allocation_helper(size_t word_size,
521 521 bool do_gc,
522 522 bool clear_all_soft_refs,
523 523 bool expect_null_mutator_alloc_region,
524 524 bool* gc_succeeded);
525 525
526 526 // Attempting to expand the heap sufficiently
527 527 // to support an allocation of the given "word_size". If
528 528 // successful, perform the allocation and return the address of the
529 529 // allocated block, or else "NULL".
530 530 HeapWord* expand_and_allocate(size_t word_size);
531 531
532 532 // Process any reference objects discovered.
533 533 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
534 534
535 535 // If during an initial mark pause we may install a pending list head which is not
536 536 // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
537 537 // to discover.
538 538 void make_pending_list_reachable();
539 539
540 540 // Merges the information gathered on a per-thread basis for all worker threads
541 541 // during GC into global variables.
542 542 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
543 543
544 544 void verify_numa_regions(const char* desc);
545 545
546 546 public:
547 547 G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
548 548
549 549 WorkGang* workers() const { return _workers; }
550 550
551 551 // Runs the given AbstractGangTask with the current active workers, returning the
552 552 // total time taken.
553 553 Tickspan run_task(AbstractGangTask* task);
554 554
555 555 G1Allocator* allocator() {
556 556 return _allocator;
557 557 }
558 558
559 559 G1HeapVerifier* verifier() {
560 560 return _verifier;
561 561 }
562 562
563 563 G1MonitoringSupport* g1mm() {
564 564 assert(_g1mm != NULL, "should have been initialized");
565 565 return _g1mm;
566 566 }
567 567
568 568 void resize_heap_if_necessary();
569 569
570 570 G1NUMA* numa() const { return _numa; }
571 571
572 572 // Expand the garbage-first heap by at least the given size (in bytes!).
573 573 // Returns true if the heap was expanded by the requested amount;
574 574 // false otherwise.
575 575 // (Rounds up to a HeapRegion boundary.)
576 576 bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
577 577 bool expand_single_region(uint node_index);
578 578
579 579 // Returns the PLAB statistics for a given destination.
580 580 inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
581 581
582 582 // Determines PLAB size for a given destination.
583 583 inline size_t desired_plab_sz(G1HeapRegionAttr dest);
584 584
585 585 // Do anything common to GC's.
586 586 void gc_prologue(bool full);
587 587 void gc_epilogue(bool full);
588 588
589 589 // Does the given region fulfill remembered set based eager reclaim candidate requirements?
590 590 bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
591 591
592 592 // Modify the reclaim candidate set and test for presence.
593 593 // These are only valid for starts_humongous regions.
594 594 inline void set_humongous_reclaim_candidate(uint region, bool value);
595 595 inline bool is_humongous_reclaim_candidate(uint region);
596 596 inline void set_has_humongous_reclaim_candidate(bool value);
597 597
598 598 // Remove from the reclaim candidate set. Also remove from the
599 599 // collection set so that later encounters avoid the slow path.
600 600 inline void set_humongous_is_live(oop obj);
601 601
602 602 // Register the given region to be part of the collection set.
603 603 inline void register_humongous_region_with_region_attr(uint index);
604 604
605 605 // We register a region with the fast "in collection set" test. We
606 606 // simply set to true the array slot corresponding to this region.
607 607 void register_young_region_with_region_attr(HeapRegion* r) {
608 608 _region_attr.set_in_young(r->hrm_index());
609 609 }
610 610 inline void register_region_with_region_attr(HeapRegion* r);
611 611 inline void register_old_region_with_region_attr(HeapRegion* r);
612 612 inline void register_optional_region_with_region_attr(HeapRegion* r);
613 613
614 614 void clear_region_attr(const HeapRegion* hr) {
615 615 _region_attr.clear(hr);
616 616 }
617 617
618 618 void clear_region_attr() {
619 619 _region_attr.clear();
620 620 }
621 621
622 622 // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking
623 623 // for all regions.
624 624 void verify_region_attr_remset_update() PRODUCT_RETURN;
625 625
626 626 bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
627 627
628 628 // This is called at the start of either a concurrent cycle or a Full
629 629 // GC to update the number of old marking cycles started.
630 630 void increment_old_marking_cycles_started();
631 631
632 632 // This is called at the end of either a concurrent cycle or a Full
633 633 // GC to update the number of old marking cycles completed. Those two
634 634 // can happen in a nested fashion, i.e., we start a concurrent
635 635 // cycle, a Full GC happens half-way through it which ends first,
636 636 // and then the cycle notices that a Full GC happened and ends
637 637 // too. The concurrent parameter is a boolean to help us do a bit
638 638 // tighter consistency checking in the method. If concurrent is
639 639 // false, the caller is the inner caller in the nesting (i.e., the
640 640 // Full GC). If concurrent is true, the caller is the outer caller
641 641 // in this nesting (i.e., the concurrent cycle). Further nesting is
642 642 // not currently supported. The end of this call also notifies
643 643 // the G1OldGCCount_lock in case a Java thread is waiting for a full
644 644 // GC to happen (e.g., it called System.gc() with
645 645 // +ExplicitGCInvokesConcurrent).
646 646 void increment_old_marking_cycles_completed(bool concurrent);
647 647
648 648 uint old_marking_cycles_completed() {
649 649 return _old_marking_cycles_completed;
650 650 }
651 651
652 652 G1HRPrinter* hr_printer() { return &_hr_printer; }
653 653
654 654 // Allocates a new heap region instance.
655 655 HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
656 656
657 657 // Allocate the highest free region in the reserved heap. This will commit
658 658 // regions as necessary.
659 659 HeapRegion* alloc_highest_free_region();
660 660
661 661 // Frees a region by resetting its metadata and adding it to the free list
662 662 // passed as a parameter (this is usually a local list which will be appended
663 663 // to the master free list later or NULL if free list management is handled
664 664 // in another way).
665 665 // Callers must ensure they are the only one calling free on the given region
666 666 // at the same time.
667 667 void free_region(HeapRegion* hr, FreeRegionList* free_list);
668 668
669 669 // It dirties the cards that cover the block so that the post
670 670 // write barrier never queues anything when updating objects on this
671 671 // block. It is assumed (and in fact we assert) that the block
672 672 // belongs to a young region.
673 673 inline void dirty_young_block(HeapWord* start, size_t word_size);
674 674
675 675 // Frees a humongous region by collapsing it into individual regions
676 676 // and calling free_region() for each of them. The freed regions
677 677 // will be added to the free list that's passed as a parameter (this
678 678 // is usually a local list which will be appended to the master free
679 679 // list later).
680 680 // The method assumes that only a single thread is ever calling
681 681 // this for a particular region at once.
682 682 void free_humongous_region(HeapRegion* hr,
683 683 FreeRegionList* free_list);
684 684
685 685 // Facility for allocating in 'archive' regions in high heap memory and
686 686 // recording the allocated ranges. These should all be called from the
687 687 // VM thread at safepoints, without the heap lock held. They can be used
688 688 // to create and archive a set of heap regions which can be mapped at the
689 689 // same fixed addresses in a subsequent JVM invocation.
690 690 void begin_archive_alloc_range(bool open = false);
691 691
692 692 // Check if the requested size would be too large for an archive allocation.
693 693 bool is_archive_alloc_too_large(size_t word_size);
694 694
695 695 // Allocate memory of the requested size from the archive region. This will
696 696 // return NULL if the size is too large or if no memory is available. It
697 697 // does not trigger a garbage collection.
698 698 HeapWord* archive_mem_allocate(size_t word_size);
699 699
700 700 // Optionally aligns the end address and returns the allocated ranges in
701 701 // an array of MemRegions in order of ascending addresses.
702 702 void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
703 703 size_t end_alignment_in_bytes = 0);
704 704
705 705 // Facility for allocating a fixed range within the heap and marking
706 706 // the containing regions as 'archive'. For use at JVM init time, when the
707 707 // caller may mmap archived heap data at the specified range(s).
708 708 // Verify that the MemRegions specified in the argument array are within the
709 709 // reserved heap.
710 710 bool check_archive_addresses(MemRegion* range, size_t count);
711 711
712 712 // Commit the appropriate G1 regions containing the specified MemRegions
713 713 // and mark them as 'archive' regions. The regions in the array must be
714 714 // non-overlapping and in order of ascending address.
715 715 bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
716 716
717 717 // Insert any required filler objects in the G1 regions around the specified
718 718 // ranges to make the regions parseable. This must be called after
719 719 // alloc_archive_regions, and after class loading has occurred.
720 720 void fill_archive_regions(MemRegion* range, size_t count);
721 721
722 722 // For each of the specified MemRegions, uncommit the containing G1 regions
723 723 // which had been allocated by alloc_archive_regions. This should be called
724 724 // rather than fill_archive_regions at JVM init time if the archive file
725 725 // mapping failed, with the same non-overlapping and sorted MemRegion array.
726 726 void dealloc_archive_regions(MemRegion* range, size_t count);
727 727
728 728 oop materialize_archived_object(oop obj);
729 729
730 730 private:
731 731
732 732 // Shrink the garbage-first heap by at most the given size (in bytes!).
733 733 // (Rounds down to a HeapRegion boundary.)
734 734 void shrink(size_t expand_bytes);
735 735 void shrink_helper(size_t expand_bytes);
736 736
737 737 #if TASKQUEUE_STATS
738 738 static void print_taskqueue_stats_hdr(outputStream* const st);
739 739 void print_taskqueue_stats() const;
740 740 void reset_taskqueue_stats();
741 741 #endif // TASKQUEUE_STATS
742 742
743 743 // Schedule the VM operation that will do an evacuation pause to
744 744 // satisfy an allocation request of word_size. *succeeded will
745 745 // return whether the VM operation was successful (it did do an
746 746 // evacuation pause) or not (another thread beat us to it or the GC
747 747 // locker was active). Given that we should not be holding the
748 748 // Heap_lock when we enter this method, we will pass the
749 749 // gc_count_before (i.e., total_collections()) as a parameter since
750 750 // it has to be read while holding the Heap_lock. Currently, both
751 751 // methods that call do_collection_pause() release the Heap_lock
752 752 // before the call, so it's easy to read gc_count_before just before.
753 753 HeapWord* do_collection_pause(size_t word_size,
754 754 uint gc_count_before,
755 755 bool* succeeded,
756 756 GCCause::Cause gc_cause);
757 757
758 758 void wait_for_root_region_scanning();
759 759
760 760 // The guts of the incremental collection pause, executed by the vm
761 761 // thread. It returns false if it is unable to do the collection due
762 762 // to the GC locker being active, true otherwise
763 763 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
764 764
765 765 G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
766 766 void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
767 767 void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
768 768
769 769 void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
770 770
771 771 // Actually do the work of evacuating the parts of the collection set.
772 772 void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
773 773 void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
↓ open down ↓ |
773 lines elided |
↑ open up ↑ |
774 774 private:
775 775 // Evacuate the next set of optional regions.
776 776 void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
777 777
778 778 public:
779 779 void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
780 780 void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
781 781 G1RedirtyCardsQueueSet* rdcqs,
782 782 G1ParScanThreadStateSet* pss);
783 783
784 - void expand_heap_after_young_collection();
784 + void resize_heap_after_young_collection();
785 + bool expand_heap_after_young_collection();
786 + void shrink_heap_after_young_collection();
787 + void expand_heap_after_concurrent_mark();
788 +
785 789 // Update object copying statistics.
786 790 void record_obj_copy_mem_stats();
787 791
788 792 // The hot card cache for remembered set insertion optimization.
789 793 G1HotCardCache* _hot_card_cache;
790 794
791 795 // The g1 remembered set of the heap.
792 796 G1RemSet* _rem_set;
793 797
794 798 // After a collection pause, convert the regions in the collection set into free
795 799 // regions.
796 800 void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
797 801
798 802 // Abandon the current collection set without recording policy
799 803 // statistics or updating free lists.
800 804 void abandon_collection_set(G1CollectionSet* collection_set);
801 805
802 806 // The concurrent marker (and the thread it runs in.)
803 807 G1ConcurrentMark* _cm;
804 808 G1ConcurrentMarkThread* _cm_thread;
805 809
806 810 // The concurrent refiner.
807 811 G1ConcurrentRefine* _cr;
808 812
809 813 // The parallel task queues
810 814 RefToScanQueueSet *_task_queues;
811 815
812 816 // True iff a evacuation has failed in the current collection.
813 817 bool _evacuation_failed;
814 818
815 819 EvacuationFailedInfo* _evacuation_failed_info_array;
816 820
817 821 // Failed evacuations cause some logical from-space objects to have
818 822 // forwarding pointers to themselves. Reset them.
819 823 void remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs);
820 824
821 825 // Restore the objects in the regions in the collection set after an
822 826 // evacuation failure.
823 827 void restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs);
824 828
825 829 PreservedMarksSet _preserved_marks_set;
826 830
827 831 // Preserve the mark of "obj", if necessary, in preparation for its mark
828 832 // word being overwritten with a self-forwarding-pointer.
829 833 void preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m);
830 834
831 835 #ifndef PRODUCT
832 836 // Support for forcing evacuation failures. Analogous to
833 837 // PromotionFailureALot for the other collectors.
834 838
835 839 // Records whether G1EvacuationFailureALot should be in effect
836 840 // for the current GC
837 841 bool _evacuation_failure_alot_for_current_gc;
838 842
839 843 // Used to record the GC number for interval checking when
840 844 // determining whether G1EvaucationFailureALot is in effect
841 845 // for the current GC.
842 846 size_t _evacuation_failure_alot_gc_number;
843 847
844 848 // Count of the number of evacuations between failures.
845 849 volatile size_t _evacuation_failure_alot_count;
846 850
847 851 // Set whether G1EvacuationFailureALot should be in effect
848 852 // for the current GC (based upon the type of GC and which
849 853 // command line flags are set);
850 854 inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
851 855 bool during_initial_mark,
852 856 bool mark_or_rebuild_in_progress);
853 857
854 858 inline void set_evacuation_failure_alot_for_current_gc();
855 859
856 860 // Return true if it's time to cause an evacuation failure.
857 861 inline bool evacuation_should_fail();
858 862
859 863 // Reset the G1EvacuationFailureALot counters. Should be called at
860 864 // the end of an evacuation pause in which an evacuation failure occurred.
861 865 inline void reset_evacuation_should_fail();
862 866 #endif // !PRODUCT
863 867
864 868 // ("Weak") Reference processing support.
865 869 //
866 870 // G1 has 2 instances of the reference processor class. One
867 871 // (_ref_processor_cm) handles reference object discovery
868 872 // and subsequent processing during concurrent marking cycles.
869 873 //
870 874 // The other (_ref_processor_stw) handles reference object
871 875 // discovery and processing during full GCs and incremental
872 876 // evacuation pauses.
873 877 //
874 878 // During an incremental pause, reference discovery will be
875 879 // temporarily disabled for _ref_processor_cm and will be
876 880 // enabled for _ref_processor_stw. At the end of the evacuation
877 881 // pause references discovered by _ref_processor_stw will be
878 882 // processed and discovery will be disabled. The previous
879 883 // setting for reference object discovery for _ref_processor_cm
880 884 // will be re-instated.
881 885 //
882 886 // At the start of marking:
883 887 // * Discovery by the CM ref processor is verified to be inactive
884 888 // and it's discovered lists are empty.
885 889 // * Discovery by the CM ref processor is then enabled.
886 890 //
887 891 // At the end of marking:
888 892 // * Any references on the CM ref processor's discovered
889 893 // lists are processed (possibly MT).
890 894 //
891 895 // At the start of full GC we:
892 896 // * Disable discovery by the CM ref processor and
893 897 // empty CM ref processor's discovered lists
894 898 // (without processing any entries).
895 899 // * Verify that the STW ref processor is inactive and it's
896 900 // discovered lists are empty.
897 901 // * Temporarily set STW ref processor discovery as single threaded.
898 902 // * Temporarily clear the STW ref processor's _is_alive_non_header
899 903 // field.
900 904 // * Finally enable discovery by the STW ref processor.
901 905 //
902 906 // The STW ref processor is used to record any discovered
903 907 // references during the full GC.
904 908 //
905 909 // At the end of a full GC we:
906 910 // * Enqueue any reference objects discovered by the STW ref processor
907 911 // that have non-live referents. This has the side-effect of
908 912 // making the STW ref processor inactive by disabling discovery.
909 913 // * Verify that the CM ref processor is still inactive
910 914 // and no references have been placed on it's discovered
911 915 // lists (also checked as a precondition during initial marking).
912 916
913 917 // The (stw) reference processor...
914 918 ReferenceProcessor* _ref_processor_stw;
915 919
916 920 // During reference object discovery, the _is_alive_non_header
917 921 // closure (if non-null) is applied to the referent object to
918 922 // determine whether the referent is live. If so then the
919 923 // reference object does not need to be 'discovered' and can
920 924 // be treated as a regular oop. This has the benefit of reducing
921 925 // the number of 'discovered' reference objects that need to
922 926 // be processed.
923 927 //
924 928 // Instance of the is_alive closure for embedding into the
925 929 // STW reference processor as the _is_alive_non_header field.
926 930 // Supplying a value for the _is_alive_non_header field is
927 931 // optional but doing so prevents unnecessary additions to
928 932 // the discovered lists during reference discovery.
929 933 G1STWIsAliveClosure _is_alive_closure_stw;
930 934
931 935 G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
932 936
933 937 // The (concurrent marking) reference processor...
934 938 ReferenceProcessor* _ref_processor_cm;
935 939
936 940 // Instance of the concurrent mark is_alive closure for embedding
937 941 // into the Concurrent Marking reference processor as the
938 942 // _is_alive_non_header field. Supplying a value for the
939 943 // _is_alive_non_header field is optional but doing so prevents
940 944 // unnecessary additions to the discovered lists during reference
941 945 // discovery.
942 946 G1CMIsAliveClosure _is_alive_closure_cm;
943 947
944 948 G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
945 949 public:
946 950
947 951 RefToScanQueue *task_queue(uint i) const;
948 952
949 953 uint num_task_queues() const;
950 954
951 955 // Create a G1CollectedHeap.
952 956 // Must call the initialize method afterwards.
953 957 // May not return if something goes wrong.
954 958 G1CollectedHeap();
955 959
956 960 private:
957 961 jint initialize_concurrent_refinement();
958 962 jint initialize_young_gen_sampling_thread();
959 963 public:
960 964 // Initialize the G1CollectedHeap to have the initial and
961 965 // maximum sizes and remembered and barrier sets
962 966 // specified by the policy object.
963 967 jint initialize();
964 968
965 969 virtual void stop();
966 970 virtual void safepoint_synchronize_begin();
967 971 virtual void safepoint_synchronize_end();
968 972
969 973 // Does operations required after initialization has been done.
970 974 void post_initialize();
971 975
972 976 // Initialize weak reference processing.
973 977 void ref_processing_init();
974 978
975 979 virtual Name kind() const {
976 980 return CollectedHeap::G1;
977 981 }
978 982
979 983 virtual const char* name() const {
980 984 return "G1";
981 985 }
982 986
983 987 const G1CollectorState* collector_state() const { return &_collector_state; }
984 988 G1CollectorState* collector_state() { return &_collector_state; }
985 989
986 990 // The current policy object for the collector.
987 991 G1Policy* policy() const { return _policy; }
988 992 // The remembered set.
989 993 G1RemSet* rem_set() const { return _rem_set; }
990 994
991 995 inline G1GCPhaseTimes* phase_times() const;
992 996
993 997 HeapRegionManager* hrm() const { return _hrm; }
994 998
995 999 const G1CollectionSet* collection_set() const { return &_collection_set; }
996 1000 G1CollectionSet* collection_set() { return &_collection_set; }
997 1001
998 1002 virtual SoftRefPolicy* soft_ref_policy();
999 1003
1000 1004 virtual void initialize_serviceability();
1001 1005 virtual MemoryUsage memory_usage();
1002 1006 virtual GrowableArray<GCMemoryManager*> memory_managers();
1003 1007 virtual GrowableArray<MemoryPool*> memory_pools();
1004 1008
1005 1009 // Try to minimize the remembered set.
1006 1010 void scrub_rem_set();
1007 1011
1008 1012 // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1009 1013 void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
1010 1014
1011 1015 // The shared block offset table array.
1012 1016 G1BlockOffsetTable* bot() const { return _bot; }
1013 1017
1014 1018 // Reference Processing accessors
1015 1019
1016 1020 // The STW reference processor....
1017 1021 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1018 1022
1019 1023 G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1020 1024
1021 1025 // The Concurrent Marking reference processor...
1022 1026 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1023 1027
1024 1028 size_t unused_committed_regions_in_bytes() const;
1025 1029
1026 1030 virtual size_t capacity() const;
1027 1031 virtual size_t used() const;
1028 1032 // This should be called when we're not holding the heap lock. The
1029 1033 // result might be a bit inaccurate.
1030 1034 size_t used_unlocked() const;
1031 1035 size_t recalculate_used() const;
1032 1036
1033 1037 // These virtual functions do the actual allocation.
1034 1038 // Some heaps may offer a contiguous region for shared non-blocking
1035 1039 // allocation, via inlined code (by exporting the address of the top and
1036 1040 // end fields defining the extent of the contiguous allocation region.)
1037 1041 // But G1CollectedHeap doesn't yet support this.
1038 1042
1039 1043 virtual bool is_maximal_no_gc() const {
1040 1044 return _hrm->available() == 0;
1041 1045 }
1042 1046
1043 1047 // Returns whether there are any regions left in the heap for allocation.
1044 1048 bool has_regions_left_for_allocation() const {
1045 1049 return !is_maximal_no_gc() || num_free_regions() != 0;
1046 1050 }
1047 1051
1048 1052 // The current number of regions in the heap.
1049 1053 uint num_regions() const { return _hrm->length(); }
1050 1054
1051 1055 // The max number of regions in the heap.
1052 1056 uint max_regions() const { return _hrm->max_length(); }
1053 1057
1054 1058 // Max number of regions that can be comitted.
1055 1059 uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
1056 1060
1057 1061 // The number of regions that are completely free.
1058 1062 uint num_free_regions() const { return _hrm->num_free_regions(); }
1059 1063
1060 1064 // The number of regions that can be allocated into.
1061 1065 uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1062 1066
1063 1067 MemoryUsage get_auxiliary_data_memory_usage() const {
1064 1068 return _hrm->get_auxiliary_data_memory_usage();
1065 1069 }
1066 1070
1067 1071 // The number of regions that are not completely free.
1068 1072 uint num_used_regions() const { return num_regions() - num_free_regions(); }
1069 1073
1070 1074 #ifdef ASSERT
↓ open down ↓ |
276 lines elided |
↑ open up ↑ |
1071 1075 bool is_on_master_free_list(HeapRegion* hr) {
1072 1076 return _hrm->is_free(hr);
1073 1077 }
1074 1078 #endif // ASSERT
1075 1079
1076 1080 inline void old_set_add(HeapRegion* hr);
1077 1081 inline void old_set_remove(HeapRegion* hr);
1078 1082
1079 1083 inline void archive_set_add(HeapRegion* hr);
1080 1084
1081 - size_t non_young_capacity_bytes() {
1085 + size_t non_young_capacity_bytes() const {
1082 1086 return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1083 1087 }
1084 1088
1085 1089 // Determine whether the given region is one that we are using as an
1086 1090 // old GC alloc region.
1087 1091 bool is_old_gc_alloc_region(HeapRegion* hr);
1088 1092
1089 1093 // Perform a collection of the heap; intended for use in implementing
1090 1094 // "System.gc". This probably implies as full a collection as the
1091 1095 // "CollectedHeap" supports.
1092 1096 virtual void collect(GCCause::Cause cause);
1093 1097
1094 1098 // Perform a collection of the heap with the given cause.
1095 1099 // Returns whether this collection actually executed.
1096 1100 bool try_collect(GCCause::Cause cause);
1097 1101
1098 1102 // True iff an evacuation has failed in the most-recent collection.
1099 1103 bool evacuation_failed() { return _evacuation_failed; }
1100 1104
1101 1105 void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1102 1106 void prepend_to_freelist(FreeRegionList* list);
1103 1107 void decrement_summary_bytes(size_t bytes);
1104 1108
1105 1109 virtual bool is_in(const void* p) const;
1106 1110 #ifdef ASSERT
1107 1111 // Returns whether p is in one of the available areas of the heap. Slow but
1108 1112 // extensive version.
1109 1113 bool is_in_exact(const void* p) const;
1110 1114 #endif
1111 1115
1112 1116 // Return "TRUE" iff the given object address is within the collection
1113 1117 // set. Assumes that the reference points into the heap.
1114 1118 inline bool is_in_cset(const HeapRegion *hr);
1115 1119 inline bool is_in_cset(oop obj);
1116 1120 inline bool is_in_cset(HeapWord* addr);
1117 1121
1118 1122 inline bool is_in_cset_or_humongous(const oop obj);
1119 1123
1120 1124 private:
1121 1125 // This array is used for a quick test on whether a reference points into
1122 1126 // the collection set or not. Each of the array's elements denotes whether the
1123 1127 // corresponding region is in the collection set or not.
1124 1128 G1HeapRegionAttrBiasedMappedArray _region_attr;
1125 1129
1126 1130 public:
1127 1131
1128 1132 inline G1HeapRegionAttr region_attr(const void* obj) const;
1129 1133 inline G1HeapRegionAttr region_attr(uint idx) const;
1130 1134
1131 1135 // Return "TRUE" iff the given object address is in the reserved
1132 1136 // region of g1.
1133 1137 bool is_in_g1_reserved(const void* p) const {
1134 1138 return _hrm->reserved().contains(p);
1135 1139 }
1136 1140
1137 1141 // Returns a MemRegion that corresponds to the space that has been
1138 1142 // reserved for the heap
1139 1143 MemRegion g1_reserved() const {
1140 1144 return _hrm->reserved();
1141 1145 }
1142 1146
1143 1147 MemRegion reserved_region() const {
1144 1148 return _reserved;
1145 1149 }
1146 1150
1147 1151 HeapWord* base() const {
1148 1152 return _reserved.start();
1149 1153 }
1150 1154
1151 1155 bool is_in_reserved(const void* addr) const {
1152 1156 return _reserved.contains(addr);
1153 1157 }
1154 1158
1155 1159 G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1156 1160
1157 1161 G1CardTable* card_table() const {
1158 1162 return _card_table;
1159 1163 }
1160 1164
1161 1165 // Iteration functions.
1162 1166
1163 1167 // Iterate over all objects, calling "cl.do_object" on each.
1164 1168 virtual void object_iterate(ObjectClosure* cl);
1165 1169
1166 1170 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1167 1171 virtual void keep_alive(oop obj);
1168 1172
1169 1173 // Iterate over heap regions, in address order, terminating the
1170 1174 // iteration early if the "do_heap_region" method returns "true".
1171 1175 void heap_region_iterate(HeapRegionClosure* blk) const;
1172 1176
1173 1177 // Return the region with the given index. It assumes the index is valid.
1174 1178 inline HeapRegion* region_at(uint index) const;
1175 1179 inline HeapRegion* region_at_or_null(uint index) const;
1176 1180
1177 1181 // Return the next region (by index) that is part of the same
1178 1182 // humongous object that hr is part of.
1179 1183 inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1180 1184
1181 1185 // Calculate the region index of the given address. Given address must be
1182 1186 // within the heap.
1183 1187 inline uint addr_to_region(HeapWord* addr) const;
1184 1188
1185 1189 inline HeapWord* bottom_addr_for_region(uint index) const;
1186 1190
1187 1191 // Two functions to iterate over the heap regions in parallel. Threads
1188 1192 // compete using the HeapRegionClaimer to claim the regions before
1189 1193 // applying the closure on them.
1190 1194 // The _from_worker_offset version uses the HeapRegionClaimer and
1191 1195 // the worker id to calculate a start offset to prevent all workers to
1192 1196 // start from the point.
1193 1197 void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1194 1198 HeapRegionClaimer* hrclaimer,
1195 1199 uint worker_id) const;
1196 1200
1197 1201 void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1198 1202 HeapRegionClaimer* hrclaimer) const;
1199 1203
1200 1204 // Iterate over all regions in the collection set in parallel.
1201 1205 void collection_set_par_iterate_all(HeapRegionClosure* cl,
1202 1206 HeapRegionClaimer* hr_claimer,
1203 1207 uint worker_id);
1204 1208
1205 1209 // Iterate over all regions currently in the current collection set.
1206 1210 void collection_set_iterate_all(HeapRegionClosure* blk);
1207 1211
1208 1212 // Iterate over the regions in the current increment of the collection set.
1209 1213 // Starts the iteration so that the start regions of a given worker id over the
1210 1214 // set active_workers are evenly spread across the set of collection set regions
1211 1215 // to be iterated.
1212 1216 // The variant with the HeapRegionClaimer guarantees that the closure will be
1213 1217 // applied to a particular region exactly once.
1214 1218 void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
1215 1219 collection_set_iterate_increment_from(blk, NULL, worker_id);
1216 1220 }
1217 1221 void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
1218 1222
1219 1223 // Returns the HeapRegion that contains addr. addr must not be NULL.
1220 1224 template <class T>
1221 1225 inline HeapRegion* heap_region_containing(const T addr) const;
1222 1226
1223 1227 // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1224 1228 // region. addr must not be NULL.
1225 1229 template <class T>
1226 1230 inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1227 1231
1228 1232 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1229 1233 // each address in the (reserved) heap is a member of exactly
1230 1234 // one block. The defining characteristic of a block is that it is
1231 1235 // possible to find its size, and thus to progress forward to the next
1232 1236 // block. (Blocks may be of different sizes.) Thus, blocks may
1233 1237 // represent Java objects, or they might be free blocks in a
1234 1238 // free-list-based heap (or subheap), as long as the two kinds are
1235 1239 // distinguishable and the size of each is determinable.
1236 1240
1237 1241 // Returns the address of the start of the "block" that contains the
1238 1242 // address "addr". We say "blocks" instead of "object" since some heaps
1239 1243 // may not pack objects densely; a chunk may either be an object or a
1240 1244 // non-object.
1241 1245 HeapWord* block_start(const void* addr) const;
1242 1246
1243 1247 // Requires "addr" to be the start of a block, and returns "TRUE" iff
1244 1248 // the block is an object.
1245 1249 bool block_is_obj(const HeapWord* addr) const;
1246 1250
1247 1251 // Section on thread-local allocation buffers (TLABs)
1248 1252 // See CollectedHeap for semantics.
1249 1253
1250 1254 bool supports_tlab_allocation() const;
1251 1255 size_t tlab_capacity(Thread* ignored) const;
1252 1256 size_t tlab_used(Thread* ignored) const;
1253 1257 size_t max_tlab_size() const;
1254 1258 size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1255 1259
1256 1260 inline bool is_in_young(const oop obj);
1257 1261
1258 1262 // Returns "true" iff the given word_size is "very large".
1259 1263 static bool is_humongous(size_t word_size) {
1260 1264 // Note this has to be strictly greater-than as the TLABs
1261 1265 // are capped at the humongous threshold and we want to
1262 1266 // ensure that we don't try to allocate a TLAB as
1263 1267 // humongous and that we don't allocate a humongous
1264 1268 // object in a TLAB.
1265 1269 return word_size > _humongous_object_threshold_in_words;
1266 1270 }
1267 1271
1268 1272 // Returns the humongous threshold for a specific region size
1269 1273 static size_t humongous_threshold_for(size_t region_size) {
1270 1274 return (region_size / 2);
1271 1275 }
↓ open down ↓ |
180 lines elided |
↑ open up ↑ |
1272 1276
1273 1277 // Returns the number of regions the humongous object of the given word size
1274 1278 // requires.
1275 1279 static size_t humongous_obj_size_in_regions(size_t word_size);
1276 1280
1277 1281 // Print the maximum heap capacity.
1278 1282 virtual size_t max_capacity() const;
1279 1283
1280 1284 // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1281 1285 virtual size_t max_reserved_capacity() const;
1286 +
1287 + // Print the soft maximum heap capacity.
1288 + size_t soft_max_capacity() const;
1282 1289
1283 1290 virtual jlong millis_since_last_gc();
1284 1291
1285 1292
1286 1293 // Convenience function to be used in situations where the heap type can be
1287 1294 // asserted to be this type.
1288 1295 static G1CollectedHeap* heap();
1289 1296
1290 1297 void set_region_short_lived_locked(HeapRegion* hr);
1291 1298 // add appropriate methods for any other surv rate groups
1292 1299
1293 1300 const G1SurvivorRegions* survivor() const { return &_survivor; }
1294 1301
1295 1302 uint eden_regions_count() const { return _eden.length(); }
1296 1303 uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1297 1304 uint survivor_regions_count() const { return _survivor.length(); }
1298 1305 uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1299 1306 size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1300 1307 size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1301 1308 uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1302 1309 uint old_regions_count() const { return _old_set.length(); }
1303 1310 uint archive_regions_count() const { return _archive_set.length(); }
1304 1311 uint humongous_regions_count() const { return _humongous_set.length(); }
1305 1312
1306 1313 #ifdef ASSERT
1307 1314 bool check_young_list_empty();
1308 1315 #endif
1309 1316
1310 1317 // *** Stuff related to concurrent marking. It's not clear to me that so
1311 1318 // many of these need to be public.
1312 1319
1313 1320 // The functions below are helper functions that a subclass of
1314 1321 // "CollectedHeap" can use in the implementation of its virtual
1315 1322 // functions.
1316 1323 // This performs a concurrent marking of the live objects in a
1317 1324 // bitmap off to the side.
1318 1325 void do_concurrent_mark();
1319 1326
1320 1327 bool is_marked_next(oop obj) const;
1321 1328
1322 1329 // Determine if an object is dead, given the object and also
1323 1330 // the region to which the object belongs. An object is dead
1324 1331 // iff a) it was not allocated since the last mark, b) it
1325 1332 // is not marked, and c) it is not in an archive region.
1326 1333 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1327 1334 return
1328 1335 hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
1329 1336 !hr->is_archive();
1330 1337 }
1331 1338
1332 1339 // This function returns true when an object has been
1333 1340 // around since the previous marking and hasn't yet
1334 1341 // been marked during this marking, and is not in an archive region.
1335 1342 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1336 1343 return
1337 1344 !hr->obj_allocated_since_next_marking(obj) &&
1338 1345 !is_marked_next(obj) &&
1339 1346 !hr->is_archive();
1340 1347 }
1341 1348
1342 1349 // Determine if an object is dead, given only the object itself.
1343 1350 // This will find the region to which the object belongs and
1344 1351 // then call the region version of the same function.
1345 1352
1346 1353 // Added if it is NULL it isn't dead.
1347 1354
1348 1355 inline bool is_obj_dead(const oop obj) const;
1349 1356
1350 1357 inline bool is_obj_ill(const oop obj) const;
1351 1358
1352 1359 inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1353 1360 inline bool is_obj_dead_full(const oop obj) const;
1354 1361
1355 1362 G1ConcurrentMark* concurrent_mark() const { return _cm; }
1356 1363
1357 1364 // Refinement
1358 1365
1359 1366 G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1360 1367
1361 1368 // Optimized nmethod scanning support routines
1362 1369
1363 1370 // Register the given nmethod with the G1 heap.
1364 1371 virtual void register_nmethod(nmethod* nm);
1365 1372
1366 1373 // Unregister the given nmethod from the G1 heap.
1367 1374 virtual void unregister_nmethod(nmethod* nm);
1368 1375
1369 1376 // No nmethod flushing needed.
1370 1377 virtual void flush_nmethod(nmethod* nm) {}
1371 1378
1372 1379 // No nmethod verification implemented.
1373 1380 virtual void verify_nmethod(nmethod* nm) {}
1374 1381
1375 1382 // Free up superfluous code root memory.
1376 1383 void purge_code_root_memory();
1377 1384
1378 1385 // Rebuild the strong code root lists for each region
1379 1386 // after a full GC.
1380 1387 void rebuild_strong_code_roots();
1381 1388
1382 1389 // Partial cleaning of VM internal data structures.
1383 1390 void string_dedup_cleaning(BoolObjectClosure* is_alive,
1384 1391 OopClosure* keep_alive,
1385 1392 G1GCPhaseTimes* phase_times = NULL);
1386 1393
1387 1394 // Performs cleaning of data structures after class unloading.
1388 1395 void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred);
1389 1396
1390 1397 // Redirty logged cards in the refinement queue.
1391 1398 void redirty_logged_cards(G1RedirtyCardsQueueSet* rdcqs);
1392 1399
1393 1400 // Verification
1394 1401
1395 1402 // Deduplicate the string
1396 1403 virtual void deduplicate_string(oop str);
1397 1404
1398 1405 // Perform any cleanup actions necessary before allowing a verification.
1399 1406 virtual void prepare_for_verify();
1400 1407
1401 1408 // Perform verification.
1402 1409
1403 1410 // vo == UsePrevMarking -> use "prev" marking information,
1404 1411 // vo == UseNextMarking -> use "next" marking information
1405 1412 // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1406 1413 //
1407 1414 // NOTE: Only the "prev" marking information is guaranteed to be
1408 1415 // consistent most of the time, so most calls to this should use
1409 1416 // vo == UsePrevMarking.
1410 1417 // Currently, there is only one case where this is called with
1411 1418 // vo == UseNextMarking, which is to verify the "next" marking
1412 1419 // information at the end of remark.
1413 1420 // Currently there is only one place where this is called with
1414 1421 // vo == UseFullMarking, which is to verify the marking during a
1415 1422 // full GC.
1416 1423 void verify(VerifyOption vo);
1417 1424
1418 1425 // WhiteBox testing support.
1419 1426 virtual bool supports_concurrent_phase_control() const;
1420 1427 virtual bool request_concurrent_phase(const char* phase);
1421 1428 bool is_heterogeneous_heap() const;
1422 1429
1423 1430 virtual WorkGang* get_safepoint_workers() { return _workers; }
1424 1431
1425 1432 // The methods below are here for convenience and dispatch the
1426 1433 // appropriate method depending on value of the given VerifyOption
1427 1434 // parameter. The values for that parameter, and their meanings,
1428 1435 // are the same as those above.
1429 1436
1430 1437 bool is_obj_dead_cond(const oop obj,
1431 1438 const HeapRegion* hr,
1432 1439 const VerifyOption vo) const;
1433 1440
1434 1441 bool is_obj_dead_cond(const oop obj,
1435 1442 const VerifyOption vo) const;
1436 1443
1437 1444 G1HeapSummary create_g1_heap_summary();
1438 1445 G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1439 1446
1440 1447 // Printing
1441 1448 private:
1442 1449 void print_heap_regions() const;
1443 1450 void print_regions_on(outputStream* st) const;
1444 1451
1445 1452 public:
1446 1453 virtual void print_on(outputStream* st) const;
1447 1454 virtual void print_extended_on(outputStream* st) const;
1448 1455 virtual void print_on_error(outputStream* st) const;
1449 1456
1450 1457 virtual void print_gc_threads_on(outputStream* st) const;
1451 1458 virtual void gc_threads_do(ThreadClosure* tc) const;
1452 1459
1453 1460 // Override
1454 1461 void print_tracing_info() const;
1455 1462
1456 1463 // The following two methods are helpful for debugging RSet issues.
1457 1464 void print_cset_rsets() PRODUCT_RETURN;
1458 1465 void print_all_rsets() PRODUCT_RETURN;
1459 1466
1460 1467 // Used to print information about locations in the hs_err file.
1461 1468 virtual bool print_location(outputStream* st, void* addr) const;
1462 1469
1463 1470 size_t pending_card_num();
1464 1471 };
1465 1472
1466 1473 class G1ParEvacuateFollowersClosure : public VoidClosure {
1467 1474 private:
1468 1475 double _start_term;
1469 1476 double _term_time;
1470 1477 size_t _term_attempts;
1471 1478
1472 1479 void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1473 1480 void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
1474 1481 protected:
1475 1482 G1CollectedHeap* _g1h;
1476 1483 G1ParScanThreadState* _par_scan_state;
1477 1484 RefToScanQueueSet* _queues;
1478 1485 ParallelTaskTerminator* _terminator;
1479 1486 G1GCPhaseTimes::GCParPhases _phase;
1480 1487
1481 1488 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
1482 1489 RefToScanQueueSet* queues() { return _queues; }
1483 1490 ParallelTaskTerminator* terminator() { return _terminator; }
1484 1491
1485 1492 public:
1486 1493 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1487 1494 G1ParScanThreadState* par_scan_state,
1488 1495 RefToScanQueueSet* queues,
1489 1496 ParallelTaskTerminator* terminator,
1490 1497 G1GCPhaseTimes::GCParPhases phase)
1491 1498 : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1492 1499 _g1h(g1h), _par_scan_state(par_scan_state),
1493 1500 _queues(queues), _terminator(terminator), _phase(phase) {}
1494 1501
1495 1502 void do_void();
1496 1503
1497 1504 double term_time() const { return _term_time; }
1498 1505 size_t term_attempts() const { return _term_attempts; }
1499 1506
1500 1507 private:
1501 1508 inline bool offer_termination();
1502 1509 };
1503 1510
1504 1511 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP
↓ open down ↓ |
213 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX