Print this page
JDK-8236073 G1: Use SoftMaxHeapSize to guide GC heuristics
Split |
Close |
Expand all |
Collapse all |
--- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
26 26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
27 27
28 28 #include "gc/g1/g1BarrierSet.hpp"
29 29 #include "gc/g1/g1BiasedArray.hpp"
30 30 #include "gc/g1/g1CardTable.hpp"
31 31 #include "gc/g1/g1CollectionSet.hpp"
32 32 #include "gc/g1/g1CollectorState.hpp"
33 33 #include "gc/g1/g1ConcurrentMark.hpp"
34 34 #include "gc/g1/g1EdenRegions.hpp"
35 35 #include "gc/g1/g1EvacFailure.hpp"
36 36 #include "gc/g1/g1EvacStats.hpp"
37 37 #include "gc/g1/g1EvacuationInfo.hpp"
38 38 #include "gc/g1/g1GCPhaseTimes.hpp"
39 39 #include "gc/g1/g1HeapTransition.hpp"
40 40 #include "gc/g1/g1HeapVerifier.hpp"
41 41 #include "gc/g1/g1HRPrinter.hpp"
42 42 #include "gc/g1/g1HeapRegionAttr.hpp"
43 43 #include "gc/g1/g1MonitoringSupport.hpp"
44 44 #include "gc/g1/g1NUMA.hpp"
45 45 #include "gc/g1/g1RedirtyCardsQueue.hpp"
46 46 #include "gc/g1/g1SurvivorRegions.hpp"
47 47 #include "gc/g1/g1YCTypes.hpp"
48 48 #include "gc/g1/heapRegionManager.hpp"
49 49 #include "gc/g1/heapRegionSet.hpp"
50 50 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
51 51 #include "gc/shared/barrierSet.hpp"
52 52 #include "gc/shared/collectedHeap.hpp"
53 53 #include "gc/shared/gcHeapSummary.hpp"
54 54 #include "gc/shared/plab.hpp"
55 55 #include "gc/shared/preservedMarks.hpp"
56 56 #include "gc/shared/softRefPolicy.hpp"
57 57 #include "memory/memRegion.hpp"
58 58 #include "utilities/stack.hpp"
59 59
60 60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
61 61 // It uses the "Garbage First" heap organization and algorithm, which
62 62 // may combine concurrent marking with parallel, incremental compaction of
63 63 // heap subsets that will yield large amounts of garbage.
64 64
65 65 // Forward declarations
66 66 class HeapRegion;
67 67 class GenerationSpec;
68 68 class G1ParScanThreadState;
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
69 69 class G1ParScanThreadStateSet;
70 70 class G1ParScanThreadState;
71 71 class MemoryPool;
72 72 class MemoryManager;
73 73 class ObjectClosure;
74 74 class SpaceClosure;
75 75 class CompactibleSpaceClosure;
76 76 class Space;
77 77 class G1CardTableEntryClosure;
78 78 class G1CollectionSet;
79 +class G1HeapSizingPolicy;
79 80 class G1Policy;
80 81 class G1HotCardCache;
81 82 class G1RemSet;
82 83 class G1YoungRemSetSamplingThread;
83 84 class G1ConcurrentMark;
84 85 class G1ConcurrentMarkThread;
85 86 class G1ConcurrentRefine;
86 87 class GenerationCounters;
87 88 class STWGCTimer;
88 89 class G1NewTracer;
89 90 class EvacuationFailedInfo;
90 91 class nmethod;
91 92 class WorkGang;
92 93 class G1Allocator;
93 94 class G1ArchiveAllocator;
94 95 class G1FullGCScope;
95 96 class G1HeapVerifier;
96 97 class G1HeapSizingPolicy;
97 98 class G1HeapSummary;
98 99 class G1EvacSummary;
99 100
100 101 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
101 102 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
102 103
103 104 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
104 105 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
105 106
106 107 // The G1 STW is alive closure.
107 108 // An instance is embedded into the G1CH and used as the
108 109 // (optional) _is_alive_non_header closure in the STW
109 110 // reference processor. It is also extensively used during
110 111 // reference processing during STW evacuation pauses.
111 112 class G1STWIsAliveClosure : public BoolObjectClosure {
112 113 G1CollectedHeap* _g1h;
113 114 public:
114 115 G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
115 116 bool do_object_b(oop p);
116 117 };
117 118
118 119 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
119 120 G1CollectedHeap* _g1h;
120 121 public:
121 122 G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
122 123 bool do_object_b(oop p);
123 124 };
124 125
125 126 class G1RegionMappingChangedListener : public G1MappingChangedListener {
126 127 private:
127 128 void reset_from_card_cache(uint start_idx, size_t num_regions);
128 129 public:
129 130 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
130 131 };
131 132
132 133 class G1CollectedHeap : public CollectedHeap {
133 134 friend class VM_CollectForMetadataAllocation;
134 135 friend class VM_G1CollectForAllocation;
135 136 friend class VM_G1CollectFull;
136 137 friend class VM_G1TryInitiateConcMark;
137 138 friend class VMStructs;
138 139 friend class MutatorAllocRegion;
139 140 friend class G1FullCollector;
140 141 friend class G1GCAllocRegion;
141 142 friend class G1HeapVerifier;
142 143
143 144 // Closures used in implementation.
144 145 friend class G1ParScanThreadState;
145 146 friend class G1ParScanThreadStateSet;
146 147 friend class G1EvacuateRegionsTask;
147 148 friend class G1PLABAllocator;
148 149
149 150 // Other related classes.
150 151 friend class HeapRegionClaimer;
151 152
152 153 // Testing classes.
153 154 friend class G1CheckRegionAttrTableClosure;
154 155
155 156 private:
156 157 G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
157 158
158 159 WorkGang* _workers;
159 160 G1CardTable* _card_table;
160 161
161 162 SoftRefPolicy _soft_ref_policy;
162 163
163 164 static size_t _humongous_object_threshold_in_words;
164 165
165 166 // These sets keep track of old, archive and humongous regions respectively.
166 167 HeapRegionSet _old_set;
167 168 HeapRegionSet _archive_set;
168 169 HeapRegionSet _humongous_set;
169 170
170 171 void eagerly_reclaim_humongous_regions();
171 172 // Start a new incremental collection set for the next pause.
172 173 void start_new_collection_set();
173 174
174 175 // The block offset table for the G1 heap.
175 176 G1BlockOffsetTable* _bot;
176 177
177 178 // Tears down the region sets / lists so that they are empty and the
178 179 // regions on the heap do not belong to a region set / list. The
179 180 // only exception is the humongous set which we leave unaltered. If
180 181 // free_list_only is true, it will only tear down the master free
181 182 // list. It is called before a Full GC (free_list_only == false) or
182 183 // before heap shrinking (free_list_only == true).
183 184 void tear_down_region_sets(bool free_list_only);
184 185
185 186 // Rebuilds the region sets / lists so that they are repopulated to
186 187 // reflect the contents of the heap. The only exception is the
187 188 // humongous set which was not torn down in the first place. If
188 189 // free_list_only is true, it will only rebuild the master free
189 190 // list. It is called after a Full GC (free_list_only == false) or
190 191 // after heap shrinking (free_list_only == true).
191 192 void rebuild_region_sets(bool free_list_only);
192 193
193 194 // Callback for region mapping changed events.
194 195 G1RegionMappingChangedListener _listener;
195 196
196 197 // Handle G1 NUMA support.
197 198 G1NUMA* _numa;
198 199
199 200 // The sequence of all heap regions in the heap.
200 201 HeapRegionManager* _hrm;
201 202
202 203 // Manages all allocations with regions except humongous object allocations.
203 204 G1Allocator* _allocator;
204 205
205 206 // Manages all heap verification.
206 207 G1HeapVerifier* _verifier;
207 208
208 209 // Outside of GC pauses, the number of bytes used in all regions other
209 210 // than the current allocation region(s).
210 211 volatile size_t _summary_bytes_used;
211 212
212 213 void increase_used(size_t bytes);
213 214 void decrease_used(size_t bytes);
214 215
215 216 void set_used(size_t bytes);
216 217
217 218 // Number of bytes used in all regions during GC. Typically changed when
218 219 // retiring a GC alloc region.
219 220 size_t _bytes_used_during_gc;
220 221
221 222 // Class that handles archive allocation ranges.
222 223 G1ArchiveAllocator* _archive_allocator;
223 224
224 225 // GC allocation statistics policy for survivors.
225 226 G1EvacStats _survivor_evac_stats;
226 227
227 228 // GC allocation statistics policy for tenured objects.
228 229 G1EvacStats _old_evac_stats;
229 230
230 231 // It specifies whether we should attempt to expand the heap after a
231 232 // region allocation failure. If heap expansion fails we set this to
232 233 // false so that we don't re-attempt the heap expansion (it's likely
233 234 // that subsequent expansion attempts will also fail if one fails).
234 235 // Currently, it is only consulted during GC and it's reset at the
235 236 // start of each GC.
236 237 bool _expand_heap_after_alloc_failure;
237 238
238 239 // Helper for monitoring and management support.
239 240 G1MonitoringSupport* _g1mm;
240 241
241 242 // Records whether the region at the given index is (still) a
242 243 // candidate for eager reclaim. Only valid for humongous start
243 244 // regions; other regions have unspecified values. Humongous start
244 245 // regions are initialized at start of collection pause, with
245 246 // candidates removed from the set as they are found reachable from
246 247 // roots or the young generation.
247 248 class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
248 249 protected:
249 250 bool default_value() const { return false; }
250 251 public:
251 252 void clear() { G1BiasedMappedArray<bool>::clear(); }
252 253 void set_candidate(uint region, bool value) {
253 254 set_by_index(region, value);
254 255 }
255 256 bool is_candidate(uint region) {
256 257 return get_by_index(region);
257 258 }
258 259 };
259 260
260 261 HumongousReclaimCandidates _humongous_reclaim_candidates;
261 262 // Stores whether during humongous object registration we found candidate regions.
262 263 // If not, we can skip a few steps.
263 264 bool _has_humongous_reclaim_candidates;
264 265
265 266 G1HRPrinter _hr_printer;
266 267
267 268 // Return true if an explicit GC should start a concurrent cycle instead
268 269 // of doing a STW full GC. A concurrent cycle should be started if:
269 270 // (a) cause == _g1_humongous_allocation,
270 271 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
271 272 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
272 273 // (d) cause == _wb_conc_mark,
273 274 // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
274 275 bool should_do_concurrent_full_gc(GCCause::Cause cause);
275 276
276 277 // Attempt to start a concurrent cycle with the indicated cause.
277 278 // precondition: should_do_concurrent_full_gc(cause)
278 279 bool try_collect_concurrently(GCCause::Cause cause,
279 280 uint gc_counter,
280 281 uint old_marking_started_before);
281 282
282 283 // Return true if should upgrade to full gc after an incremental one.
283 284 bool should_upgrade_to_full_gc(GCCause::Cause cause);
284 285
285 286 // indicates whether we are in young or mixed GC mode
286 287 G1CollectorState _collector_state;
287 288
288 289 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
289 290 // concurrent cycles) we have started.
290 291 volatile uint _old_marking_cycles_started;
291 292
292 293 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
293 294 // concurrent cycles) we have completed.
294 295 volatile uint _old_marking_cycles_completed;
295 296
296 297 // This is a non-product method that is helpful for testing. It is
297 298 // called at the end of a GC and artificially expands the heap by
298 299 // allocating a number of dead regions. This way we can induce very
299 300 // frequent marking cycles and stress the cleanup / concurrent
300 301 // cleanup code more (as all the regions that will be allocated by
301 302 // this method will be found dead by the marking cycle).
302 303 void allocate_dummy_regions() PRODUCT_RETURN;
303 304
304 305 // If the HR printer is active, dump the state of the regions in the
305 306 // heap after a compaction.
306 307 void print_hrm_post_compaction();
307 308
308 309 // Create a memory mapper for auxiliary data structures of the given size and
309 310 // translation factor.
310 311 static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
311 312 size_t size,
312 313 size_t translation_factor);
313 314
314 315 void trace_heap(GCWhen::Type when, const GCTracer* tracer);
315 316
316 317 // These are macros so that, if the assert fires, we get the correct
317 318 // line number, file, etc.
318 319
319 320 #define heap_locking_asserts_params(_extra_message_) \
320 321 "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
321 322 (_extra_message_), \
322 323 BOOL_TO_STR(Heap_lock->owned_by_self()), \
323 324 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
324 325 BOOL_TO_STR(Thread::current()->is_VM_thread())
325 326
326 327 #define assert_heap_locked() \
327 328 do { \
328 329 assert(Heap_lock->owned_by_self(), \
329 330 heap_locking_asserts_params("should be holding the Heap_lock")); \
330 331 } while (0)
331 332
332 333 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
333 334 do { \
334 335 assert(Heap_lock->owned_by_self() || \
335 336 (SafepointSynchronize::is_at_safepoint() && \
336 337 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
337 338 heap_locking_asserts_params("should be holding the Heap_lock or " \
338 339 "should be at a safepoint")); \
339 340 } while (0)
340 341
341 342 #define assert_heap_locked_and_not_at_safepoint() \
342 343 do { \
343 344 assert(Heap_lock->owned_by_self() && \
344 345 !SafepointSynchronize::is_at_safepoint(), \
345 346 heap_locking_asserts_params("should be holding the Heap_lock and " \
346 347 "should not be at a safepoint")); \
347 348 } while (0)
348 349
349 350 #define assert_heap_not_locked() \
350 351 do { \
351 352 assert(!Heap_lock->owned_by_self(), \
352 353 heap_locking_asserts_params("should not be holding the Heap_lock")); \
353 354 } while (0)
354 355
355 356 #define assert_heap_not_locked_and_not_at_safepoint() \
356 357 do { \
357 358 assert(!Heap_lock->owned_by_self() && \
358 359 !SafepointSynchronize::is_at_safepoint(), \
359 360 heap_locking_asserts_params("should not be holding the Heap_lock and " \
360 361 "should not be at a safepoint")); \
361 362 } while (0)
362 363
363 364 #define assert_at_safepoint_on_vm_thread() \
364 365 do { \
365 366 assert_at_safepoint(); \
366 367 assert(Thread::current_or_null() != NULL, "no current thread"); \
367 368 assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
368 369 } while (0)
369 370
370 371 #ifdef ASSERT
371 372 #define assert_used_and_recalculate_used_equal(g1h) \
372 373 do { \
373 374 size_t cur_used_bytes = g1h->used(); \
374 375 size_t recal_used_bytes = g1h->recalculate_used(); \
375 376 assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \
376 377 " same as recalculated used(" SIZE_FORMAT ").", \
377 378 cur_used_bytes, recal_used_bytes); \
378 379 } while (0)
379 380 #else
380 381 #define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
381 382 #endif
382 383
383 384 const char* young_gc_name() const;
384 385
385 386 // The young region list.
386 387 G1EdenRegions _eden;
387 388 G1SurvivorRegions _survivor;
388 389
389 390 STWGCTimer* _gc_timer_stw;
390 391
391 392 G1NewTracer* _gc_tracer_stw;
392 393
393 394 // The current policy object for the collector.
394 395 G1Policy* _policy;
395 396 G1HeapSizingPolicy* _heap_sizing_policy;
396 397
397 398 G1CollectionSet _collection_set;
398 399
399 400 // Try to allocate a single non-humongous HeapRegion sufficient for
400 401 // an allocation of the given word_size. If do_expand is true,
401 402 // attempt to expand the heap if necessary to satisfy the allocation
402 403 // request. 'type' takes the type of region to be allocated. (Use constants
403 404 // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
404 405 HeapRegion* new_region(size_t word_size,
405 406 HeapRegionType type,
406 407 bool do_expand,
407 408 uint node_index = G1NUMA::AnyNodeIndex);
408 409
409 410 // Initialize a contiguous set of free regions of length num_regions
410 411 // and starting at index first so that they appear as a single
411 412 // humongous region.
412 413 HeapWord* humongous_obj_allocate_initialize_regions(uint first,
413 414 uint num_regions,
414 415 size_t word_size);
415 416
416 417 // Attempt to allocate a humongous object of the given size. Return
417 418 // NULL if unsuccessful.
418 419 HeapWord* humongous_obj_allocate(size_t word_size);
419 420
420 421 // The following two methods, allocate_new_tlab() and
421 422 // mem_allocate(), are the two main entry points from the runtime
422 423 // into the G1's allocation routines. They have the following
423 424 // assumptions:
424 425 //
425 426 // * They should both be called outside safepoints.
426 427 //
427 428 // * They should both be called without holding the Heap_lock.
428 429 //
429 430 // * All allocation requests for new TLABs should go to
430 431 // allocate_new_tlab().
431 432 //
432 433 // * All non-TLAB allocation requests should go to mem_allocate().
433 434 //
434 435 // * If either call cannot satisfy the allocation request using the
435 436 // current allocating region, they will try to get a new one. If
436 437 // this fails, they will attempt to do an evacuation pause and
437 438 // retry the allocation.
438 439 //
439 440 // * If all allocation attempts fail, even after trying to schedule
440 441 // an evacuation pause, allocate_new_tlab() will return NULL,
441 442 // whereas mem_allocate() will attempt a heap expansion and/or
442 443 // schedule a Full GC.
443 444 //
444 445 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
445 446 // should never be called with word_size being humongous. All
446 447 // humongous allocation requests should go to mem_allocate() which
447 448 // will satisfy them with a special path.
448 449
449 450 virtual HeapWord* allocate_new_tlab(size_t min_size,
450 451 size_t requested_size,
451 452 size_t* actual_size);
452 453
453 454 virtual HeapWord* mem_allocate(size_t word_size,
454 455 bool* gc_overhead_limit_was_exceeded);
455 456
456 457 // First-level mutator allocation attempt: try to allocate out of
457 458 // the mutator alloc region without taking the Heap_lock. This
458 459 // should only be used for non-humongous allocations.
459 460 inline HeapWord* attempt_allocation(size_t min_word_size,
460 461 size_t desired_word_size,
461 462 size_t* actual_word_size);
462 463
463 464 // Second-level mutator allocation attempt: take the Heap_lock and
464 465 // retry the allocation attempt, potentially scheduling a GC
465 466 // pause. This should only be used for non-humongous allocations.
466 467 HeapWord* attempt_allocation_slow(size_t word_size);
467 468
468 469 // Takes the Heap_lock and attempts a humongous allocation. It can
469 470 // potentially schedule a GC pause.
470 471 HeapWord* attempt_allocation_humongous(size_t word_size);
471 472
472 473 // Allocation attempt that should be called during safepoints (e.g.,
473 474 // at the end of a successful GC). expect_null_mutator_alloc_region
474 475 // specifies whether the mutator alloc region is expected to be NULL
475 476 // or not.
476 477 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
477 478 bool expect_null_mutator_alloc_region);
478 479
479 480 // These methods are the "callbacks" from the G1AllocRegion class.
480 481
481 482 // For mutator alloc regions.
482 483 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
483 484 void retire_mutator_alloc_region(HeapRegion* alloc_region,
484 485 size_t allocated_bytes);
485 486
486 487 // For GC alloc regions.
487 488 bool has_more_regions(G1HeapRegionAttr dest);
488 489 HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
489 490 void retire_gc_alloc_region(HeapRegion* alloc_region,
490 491 size_t allocated_bytes, G1HeapRegionAttr dest);
491 492
492 493 // - if explicit_gc is true, the GC is for a System.gc() etc,
493 494 // otherwise it's for a failed allocation.
494 495 // - if clear_all_soft_refs is true, all soft references should be
495 496 // cleared during the GC.
496 497 // - it returns false if it is unable to do the collection due to the
497 498 // GC locker being active, true otherwise.
498 499 bool do_full_collection(bool explicit_gc,
499 500 bool clear_all_soft_refs);
500 501
501 502 // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
502 503 virtual void do_full_collection(bool clear_all_soft_refs);
503 504
504 505 // Callback from VM_G1CollectForAllocation operation.
505 506 // This function does everything necessary/possible to satisfy a
506 507 // failed allocation request (including collection, expansion, etc.)
507 508 HeapWord* satisfy_failed_allocation(size_t word_size,
508 509 bool* succeeded);
509 510 // Internal helpers used during full GC to split it up to
510 511 // increase readability.
511 512 void abort_concurrent_cycle();
512 513 void verify_before_full_collection(bool explicit_gc);
513 514 void prepare_heap_for_full_collection();
514 515 void prepare_heap_for_mutators();
515 516 void abort_refinement();
516 517 void verify_after_full_collection();
517 518 void print_heap_after_full_collection(G1HeapTransition* heap_transition);
518 519
519 520 // Helper method for satisfy_failed_allocation()
520 521 HeapWord* satisfy_failed_allocation_helper(size_t word_size,
521 522 bool do_gc,
522 523 bool clear_all_soft_refs,
523 524 bool expect_null_mutator_alloc_region,
524 525 bool* gc_succeeded);
525 526
526 527 // Attempting to expand the heap sufficiently
527 528 // to support an allocation of the given "word_size". If
528 529 // successful, perform the allocation and return the address of the
529 530 // allocated block, or else "NULL".
530 531 HeapWord* expand_and_allocate(size_t word_size);
531 532
532 533 // Process any reference objects discovered.
533 534 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
534 535
535 536 // If during an initial mark pause we may install a pending list head which is not
536 537 // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
537 538 // to discover.
538 539 void make_pending_list_reachable();
539 540
540 541 // Merges the information gathered on a per-thread basis for all worker threads
541 542 // during GC into global variables.
542 543 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
543 544
544 545 void verify_numa_regions(const char* desc);
545 546
546 547 public:
547 548 G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
548 549
549 550 WorkGang* workers() const { return _workers; }
550 551
551 552 // Runs the given AbstractGangTask with the current active workers, returning the
552 553 // total time taken.
553 554 Tickspan run_task(AbstractGangTask* task);
554 555
555 556 G1Allocator* allocator() {
556 557 return _allocator;
557 558 }
↓ open down ↓ |
469 lines elided |
↑ open up ↑ |
558 559
559 560 G1HeapVerifier* verifier() {
560 561 return _verifier;
561 562 }
562 563
563 564 G1MonitoringSupport* g1mm() {
564 565 assert(_g1mm != NULL, "should have been initialized");
565 566 return _g1mm;
566 567 }
567 568
568 - void resize_heap_if_necessary();
569 + void resize_heap_after_full_gc();
569 570
570 571 G1NUMA* numa() const { return _numa; }
571 572
572 573 // Expand the garbage-first heap by at least the given size (in bytes!).
573 574 // Returns true if the heap was expanded by the requested amount;
574 575 // false otherwise.
575 576 // (Rounds up to a HeapRegion boundary.)
576 577 bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
577 578 bool expand_single_region(uint node_index);
578 579
579 580 // Returns the PLAB statistics for a given destination.
580 581 inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
581 582
582 583 // Determines PLAB size for a given destination.
583 584 inline size_t desired_plab_sz(G1HeapRegionAttr dest);
584 585
585 586 // Do anything common to GC's.
586 587 void gc_prologue(bool full);
587 588 void gc_epilogue(bool full);
588 589
589 590 // Does the given region fulfill remembered set based eager reclaim candidate requirements?
590 591 bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
591 592
592 593 // Modify the reclaim candidate set and test for presence.
593 594 // These are only valid for starts_humongous regions.
594 595 inline void set_humongous_reclaim_candidate(uint region, bool value);
595 596 inline bool is_humongous_reclaim_candidate(uint region);
596 597 inline void set_has_humongous_reclaim_candidate(bool value);
597 598
598 599 // Remove from the reclaim candidate set. Also remove from the
599 600 // collection set so that later encounters avoid the slow path.
600 601 inline void set_humongous_is_live(oop obj);
601 602
602 603 // Register the given region to be part of the collection set.
603 604 inline void register_humongous_region_with_region_attr(uint index);
604 605
605 606 // We register a region with the fast "in collection set" test. We
606 607 // simply set to true the array slot corresponding to this region.
607 608 void register_young_region_with_region_attr(HeapRegion* r) {
608 609 _region_attr.set_in_young(r->hrm_index());
609 610 }
610 611 inline void register_region_with_region_attr(HeapRegion* r);
611 612 inline void register_old_region_with_region_attr(HeapRegion* r);
612 613 inline void register_optional_region_with_region_attr(HeapRegion* r);
613 614
614 615 void clear_region_attr(const HeapRegion* hr) {
615 616 _region_attr.clear(hr);
616 617 }
617 618
618 619 void clear_region_attr() {
619 620 _region_attr.clear();
620 621 }
621 622
622 623 // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking
623 624 // for all regions.
624 625 void verify_region_attr_remset_update() PRODUCT_RETURN;
625 626
626 627 bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
627 628
628 629 // This is called at the start of either a concurrent cycle or a Full
629 630 // GC to update the number of old marking cycles started.
630 631 void increment_old_marking_cycles_started();
631 632
632 633 // This is called at the end of either a concurrent cycle or a Full
633 634 // GC to update the number of old marking cycles completed. Those two
634 635 // can happen in a nested fashion, i.e., we start a concurrent
635 636 // cycle, a Full GC happens half-way through it which ends first,
636 637 // and then the cycle notices that a Full GC happened and ends
637 638 // too. The concurrent parameter is a boolean to help us do a bit
638 639 // tighter consistency checking in the method. If concurrent is
639 640 // false, the caller is the inner caller in the nesting (i.e., the
640 641 // Full GC). If concurrent is true, the caller is the outer caller
641 642 // in this nesting (i.e., the concurrent cycle). Further nesting is
642 643 // not currently supported. The end of this call also notifies
643 644 // the G1OldGCCount_lock in case a Java thread is waiting for a full
644 645 // GC to happen (e.g., it called System.gc() with
645 646 // +ExplicitGCInvokesConcurrent).
646 647 void increment_old_marking_cycles_completed(bool concurrent);
647 648
648 649 uint old_marking_cycles_completed() {
649 650 return _old_marking_cycles_completed;
650 651 }
651 652
652 653 G1HRPrinter* hr_printer() { return &_hr_printer; }
653 654
654 655 // Allocates a new heap region instance.
655 656 HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
656 657
657 658 // Allocate the highest free region in the reserved heap. This will commit
658 659 // regions as necessary.
659 660 HeapRegion* alloc_highest_free_region();
660 661
661 662 // Frees a region by resetting its metadata and adding it to the free list
662 663 // passed as a parameter (this is usually a local list which will be appended
663 664 // to the master free list later or NULL if free list management is handled
664 665 // in another way).
665 666 // Callers must ensure they are the only one calling free on the given region
666 667 // at the same time.
667 668 void free_region(HeapRegion* hr, FreeRegionList* free_list);
668 669
669 670 // It dirties the cards that cover the block so that the post
670 671 // write barrier never queues anything when updating objects on this
671 672 // block. It is assumed (and in fact we assert) that the block
672 673 // belongs to a young region.
673 674 inline void dirty_young_block(HeapWord* start, size_t word_size);
674 675
675 676 // Frees a humongous region by collapsing it into individual regions
676 677 // and calling free_region() for each of them. The freed regions
677 678 // will be added to the free list that's passed as a parameter (this
678 679 // is usually a local list which will be appended to the master free
679 680 // list later).
680 681 // The method assumes that only a single thread is ever calling
681 682 // this for a particular region at once.
682 683 void free_humongous_region(HeapRegion* hr,
683 684 FreeRegionList* free_list);
684 685
685 686 // Facility for allocating in 'archive' regions in high heap memory and
686 687 // recording the allocated ranges. These should all be called from the
687 688 // VM thread at safepoints, without the heap lock held. They can be used
688 689 // to create and archive a set of heap regions which can be mapped at the
689 690 // same fixed addresses in a subsequent JVM invocation.
690 691 void begin_archive_alloc_range(bool open = false);
691 692
692 693 // Check if the requested size would be too large for an archive allocation.
693 694 bool is_archive_alloc_too_large(size_t word_size);
694 695
695 696 // Allocate memory of the requested size from the archive region. This will
696 697 // return NULL if the size is too large or if no memory is available. It
697 698 // does not trigger a garbage collection.
698 699 HeapWord* archive_mem_allocate(size_t word_size);
699 700
700 701 // Optionally aligns the end address and returns the allocated ranges in
701 702 // an array of MemRegions in order of ascending addresses.
702 703 void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
703 704 size_t end_alignment_in_bytes = 0);
704 705
705 706 // Facility for allocating a fixed range within the heap and marking
706 707 // the containing regions as 'archive'. For use at JVM init time, when the
707 708 // caller may mmap archived heap data at the specified range(s).
708 709 // Verify that the MemRegions specified in the argument array are within the
709 710 // reserved heap.
710 711 bool check_archive_addresses(MemRegion* range, size_t count);
711 712
712 713 // Commit the appropriate G1 regions containing the specified MemRegions
713 714 // and mark them as 'archive' regions. The regions in the array must be
714 715 // non-overlapping and in order of ascending address.
715 716 bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
716 717
717 718 // Insert any required filler objects in the G1 regions around the specified
718 719 // ranges to make the regions parseable. This must be called after
719 720 // alloc_archive_regions, and after class loading has occurred.
720 721 void fill_archive_regions(MemRegion* range, size_t count);
721 722
722 723 // For each of the specified MemRegions, uncommit the containing G1 regions
723 724 // which had been allocated by alloc_archive_regions. This should be called
724 725 // rather than fill_archive_regions at JVM init time if the archive file
725 726 // mapping failed, with the same non-overlapping and sorted MemRegion array.
726 727 void dealloc_archive_regions(MemRegion* range, size_t count);
727 728
728 729 oop materialize_archived_object(oop obj);
729 730
730 731 private:
731 732
732 733 // Shrink the garbage-first heap by at most the given size (in bytes!).
733 734 // (Rounds down to a HeapRegion boundary.)
734 735 void shrink(size_t expand_bytes);
735 736 void shrink_helper(size_t expand_bytes);
736 737
737 738 #if TASKQUEUE_STATS
738 739 static void print_taskqueue_stats_hdr(outputStream* const st);
739 740 void print_taskqueue_stats() const;
740 741 void reset_taskqueue_stats();
741 742 #endif // TASKQUEUE_STATS
742 743
743 744 // Schedule the VM operation that will do an evacuation pause to
744 745 // satisfy an allocation request of word_size. *succeeded will
745 746 // return whether the VM operation was successful (it did do an
746 747 // evacuation pause) or not (another thread beat us to it or the GC
747 748 // locker was active). Given that we should not be holding the
748 749 // Heap_lock when we enter this method, we will pass the
749 750 // gc_count_before (i.e., total_collections()) as a parameter since
750 751 // it has to be read while holding the Heap_lock. Currently, both
751 752 // methods that call do_collection_pause() release the Heap_lock
752 753 // before the call, so it's easy to read gc_count_before just before.
753 754 HeapWord* do_collection_pause(size_t word_size,
754 755 uint gc_count_before,
755 756 bool* succeeded,
756 757 GCCause::Cause gc_cause);
757 758
758 759 void wait_for_root_region_scanning();
759 760
760 761 // The guts of the incremental collection pause, executed by the vm
761 762 // thread. It returns false if it is unable to do the collection due
762 763 // to the GC locker being active, true otherwise
763 764 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
764 765
765 766 G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
766 767 void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
767 768 void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
↓ open down ↓ |
189 lines elided |
↑ open up ↑ |
768 769
769 770 void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
770 771
771 772 // Actually do the work of evacuating the parts of the collection set.
772 773 void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
773 774 void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
774 775 private:
775 776 // Evacuate the next set of optional regions.
776 777 void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
777 778
779 + bool expand_heap_after_young_collection();
780 + void shrink_heap_after_young_collection();
781 +
778 782 public:
779 783 void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
780 784 void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
781 785 G1RedirtyCardsQueueSet* rdcqs,
782 786 G1ParScanThreadStateSet* pss);
783 787
784 - void expand_heap_after_young_collection();
788 + void resize_heap_after_young_collection();
789 + void shrink_heap_after_concurrent_mark();
790 +
785 791 // Update object copying statistics.
786 792 void record_obj_copy_mem_stats();
787 793
788 794 // The hot card cache for remembered set insertion optimization.
789 795 G1HotCardCache* _hot_card_cache;
790 796
791 797 // The g1 remembered set of the heap.
792 798 G1RemSet* _rem_set;
793 799
794 800 // After a collection pause, convert the regions in the collection set into free
795 801 // regions.
796 802 void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
797 803
798 804 // Abandon the current collection set without recording policy
799 805 // statistics or updating free lists.
800 806 void abandon_collection_set(G1CollectionSet* collection_set);
801 807
802 808 // The concurrent marker (and the thread it runs in.)
803 809 G1ConcurrentMark* _cm;
804 810 G1ConcurrentMarkThread* _cm_thread;
805 811
806 812 // The concurrent refiner.
807 813 G1ConcurrentRefine* _cr;
808 814
809 815 // The parallel task queues
810 816 RefToScanQueueSet *_task_queues;
811 817
812 818 // True iff a evacuation has failed in the current collection.
813 819 bool _evacuation_failed;
814 820
815 821 EvacuationFailedInfo* _evacuation_failed_info_array;
816 822
817 823 // Failed evacuations cause some logical from-space objects to have
818 824 // forwarding pointers to themselves. Reset them.
819 825 void remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs);
820 826
821 827 // Restore the objects in the regions in the collection set after an
822 828 // evacuation failure.
823 829 void restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs);
824 830
825 831 PreservedMarksSet _preserved_marks_set;
826 832
827 833 // Preserve the mark of "obj", if necessary, in preparation for its mark
828 834 // word being overwritten with a self-forwarding-pointer.
829 835 void preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m);
830 836
831 837 #ifndef PRODUCT
832 838 // Support for forcing evacuation failures. Analogous to
833 839 // PromotionFailureALot for the other collectors.
834 840
835 841 // Records whether G1EvacuationFailureALot should be in effect
836 842 // for the current GC
837 843 bool _evacuation_failure_alot_for_current_gc;
838 844
839 845 // Used to record the GC number for interval checking when
840 846 // determining whether G1EvaucationFailureALot is in effect
841 847 // for the current GC.
842 848 size_t _evacuation_failure_alot_gc_number;
843 849
844 850 // Count of the number of evacuations between failures.
845 851 volatile size_t _evacuation_failure_alot_count;
846 852
847 853 // Set whether G1EvacuationFailureALot should be in effect
848 854 // for the current GC (based upon the type of GC and which
849 855 // command line flags are set);
850 856 inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
851 857 bool during_initial_mark,
852 858 bool mark_or_rebuild_in_progress);
853 859
854 860 inline void set_evacuation_failure_alot_for_current_gc();
855 861
856 862 // Return true if it's time to cause an evacuation failure.
857 863 inline bool evacuation_should_fail();
858 864
859 865 // Reset the G1EvacuationFailureALot counters. Should be called at
860 866 // the end of an evacuation pause in which an evacuation failure occurred.
861 867 inline void reset_evacuation_should_fail();
862 868 #endif // !PRODUCT
863 869
864 870 // ("Weak") Reference processing support.
865 871 //
866 872 // G1 has 2 instances of the reference processor class. One
867 873 // (_ref_processor_cm) handles reference object discovery
868 874 // and subsequent processing during concurrent marking cycles.
869 875 //
870 876 // The other (_ref_processor_stw) handles reference object
871 877 // discovery and processing during full GCs and incremental
872 878 // evacuation pauses.
873 879 //
874 880 // During an incremental pause, reference discovery will be
875 881 // temporarily disabled for _ref_processor_cm and will be
876 882 // enabled for _ref_processor_stw. At the end of the evacuation
877 883 // pause references discovered by _ref_processor_stw will be
878 884 // processed and discovery will be disabled. The previous
879 885 // setting for reference object discovery for _ref_processor_cm
880 886 // will be re-instated.
881 887 //
882 888 // At the start of marking:
883 889 // * Discovery by the CM ref processor is verified to be inactive
884 890 // and it's discovered lists are empty.
885 891 // * Discovery by the CM ref processor is then enabled.
886 892 //
887 893 // At the end of marking:
888 894 // * Any references on the CM ref processor's discovered
889 895 // lists are processed (possibly MT).
890 896 //
891 897 // At the start of full GC we:
892 898 // * Disable discovery by the CM ref processor and
893 899 // empty CM ref processor's discovered lists
894 900 // (without processing any entries).
895 901 // * Verify that the STW ref processor is inactive and it's
896 902 // discovered lists are empty.
897 903 // * Temporarily set STW ref processor discovery as single threaded.
898 904 // * Temporarily clear the STW ref processor's _is_alive_non_header
899 905 // field.
900 906 // * Finally enable discovery by the STW ref processor.
901 907 //
902 908 // The STW ref processor is used to record any discovered
903 909 // references during the full GC.
904 910 //
905 911 // At the end of a full GC we:
906 912 // * Enqueue any reference objects discovered by the STW ref processor
907 913 // that have non-live referents. This has the side-effect of
908 914 // making the STW ref processor inactive by disabling discovery.
909 915 // * Verify that the CM ref processor is still inactive
910 916 // and no references have been placed on it's discovered
911 917 // lists (also checked as a precondition during initial marking).
912 918
913 919 // The (stw) reference processor...
914 920 ReferenceProcessor* _ref_processor_stw;
915 921
916 922 // During reference object discovery, the _is_alive_non_header
917 923 // closure (if non-null) is applied to the referent object to
918 924 // determine whether the referent is live. If so then the
919 925 // reference object does not need to be 'discovered' and can
920 926 // be treated as a regular oop. This has the benefit of reducing
921 927 // the number of 'discovered' reference objects that need to
922 928 // be processed.
923 929 //
924 930 // Instance of the is_alive closure for embedding into the
925 931 // STW reference processor as the _is_alive_non_header field.
926 932 // Supplying a value for the _is_alive_non_header field is
927 933 // optional but doing so prevents unnecessary additions to
928 934 // the discovered lists during reference discovery.
929 935 G1STWIsAliveClosure _is_alive_closure_stw;
930 936
931 937 G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
932 938
933 939 // The (concurrent marking) reference processor...
934 940 ReferenceProcessor* _ref_processor_cm;
935 941
936 942 // Instance of the concurrent mark is_alive closure for embedding
937 943 // into the Concurrent Marking reference processor as the
938 944 // _is_alive_non_header field. Supplying a value for the
939 945 // _is_alive_non_header field is optional but doing so prevents
940 946 // unnecessary additions to the discovered lists during reference
941 947 // discovery.
942 948 G1CMIsAliveClosure _is_alive_closure_cm;
943 949
944 950 G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
945 951 public:
946 952
947 953 RefToScanQueue *task_queue(uint i) const;
948 954
949 955 uint num_task_queues() const;
950 956
951 957 // Create a G1CollectedHeap.
952 958 // Must call the initialize method afterwards.
953 959 // May not return if something goes wrong.
954 960 G1CollectedHeap();
955 961
956 962 private:
957 963 jint initialize_concurrent_refinement();
958 964 jint initialize_young_gen_sampling_thread();
959 965 public:
960 966 // Initialize the G1CollectedHeap to have the initial and
961 967 // maximum sizes and remembered and barrier sets
962 968 // specified by the policy object.
963 969 jint initialize();
964 970
965 971 virtual void stop();
966 972 virtual void safepoint_synchronize_begin();
967 973 virtual void safepoint_synchronize_end();
968 974
969 975 // Does operations required after initialization has been done.
970 976 void post_initialize();
971 977
972 978 // Initialize weak reference processing.
973 979 void ref_processing_init();
974 980
975 981 virtual Name kind() const {
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
976 982 return CollectedHeap::G1;
977 983 }
978 984
979 985 virtual const char* name() const {
980 986 return "G1";
981 987 }
982 988
983 989 const G1CollectorState* collector_state() const { return &_collector_state; }
984 990 G1CollectorState* collector_state() { return &_collector_state; }
985 991
992 + G1HeapSizingPolicy* heap_sizing_policy() const { return _heap_sizing_policy; }
986 993 // The current policy object for the collector.
987 994 G1Policy* policy() const { return _policy; }
988 995 // The remembered set.
989 996 G1RemSet* rem_set() const { return _rem_set; }
990 997
991 998 inline G1GCPhaseTimes* phase_times() const;
992 999
993 1000 HeapRegionManager* hrm() const { return _hrm; }
994 1001
995 1002 const G1CollectionSet* collection_set() const { return &_collection_set; }
996 1003 G1CollectionSet* collection_set() { return &_collection_set; }
997 1004
998 1005 virtual SoftRefPolicy* soft_ref_policy();
999 1006
1000 1007 virtual void initialize_serviceability();
1001 1008 virtual MemoryUsage memory_usage();
1002 1009 virtual GrowableArray<GCMemoryManager*> memory_managers();
1003 1010 virtual GrowableArray<MemoryPool*> memory_pools();
1004 1011
1005 1012 // Try to minimize the remembered set.
1006 1013 void scrub_rem_set();
1007 1014
1008 1015 // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1009 1016 void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
1010 1017
1011 1018 // The shared block offset table array.
1012 1019 G1BlockOffsetTable* bot() const { return _bot; }
1013 1020
1014 1021 // Reference Processing accessors
1015 1022
1016 1023 // The STW reference processor....
1017 1024 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1018 1025
1019 1026 G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1020 1027
1021 1028 // The Concurrent Marking reference processor...
1022 1029 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1023 1030
1024 1031 size_t unused_committed_regions_in_bytes() const;
1025 1032
1026 1033 virtual size_t capacity() const;
1027 1034 virtual size_t used() const;
1028 1035 // This should be called when we're not holding the heap lock. The
1029 1036 // result might be a bit inaccurate.
1030 1037 size_t used_unlocked() const;
1031 1038 size_t recalculate_used() const;
1032 1039
1033 1040 // These virtual functions do the actual allocation.
1034 1041 // Some heaps may offer a contiguous region for shared non-blocking
1035 1042 // allocation, via inlined code (by exporting the address of the top and
1036 1043 // end fields defining the extent of the contiguous allocation region.)
1037 1044 // But G1CollectedHeap doesn't yet support this.
1038 1045
1039 1046 virtual bool is_maximal_no_gc() const {
1040 1047 return _hrm->available() == 0;
1041 1048 }
1042 1049
1043 1050 // Returns whether there are any regions left in the heap for allocation.
1044 1051 bool has_regions_left_for_allocation() const {
1045 1052 return !is_maximal_no_gc() || num_free_regions() != 0;
1046 1053 }
1047 1054
1048 1055 // The current number of regions in the heap.
1049 1056 uint num_regions() const { return _hrm->length(); }
1050 1057
1051 1058 // The max number of regions in the heap.
1052 1059 uint max_regions() const { return _hrm->max_length(); }
1053 1060
1054 1061 // Max number of regions that can be comitted.
1055 1062 uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
1056 1063
1057 1064 // The number of regions that are completely free.
1058 1065 uint num_free_regions() const { return _hrm->num_free_regions(); }
1059 1066
1060 1067 // The number of regions that can be allocated into.
1061 1068 uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1062 1069
1063 1070 MemoryUsage get_auxiliary_data_memory_usage() const {
1064 1071 return _hrm->get_auxiliary_data_memory_usage();
1065 1072 }
1066 1073
1067 1074 // The number of regions that are not completely free.
1068 1075 uint num_used_regions() const { return num_regions() - num_free_regions(); }
1069 1076
1070 1077 #ifdef ASSERT
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
1071 1078 bool is_on_master_free_list(HeapRegion* hr) {
1072 1079 return _hrm->is_free(hr);
1073 1080 }
1074 1081 #endif // ASSERT
1075 1082
1076 1083 inline void old_set_add(HeapRegion* hr);
1077 1084 inline void old_set_remove(HeapRegion* hr);
1078 1085
1079 1086 inline void archive_set_add(HeapRegion* hr);
1080 1087
1081 - size_t non_young_capacity_bytes() {
1088 + size_t non_young_capacity_bytes() const {
1082 1089 return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1083 1090 }
1084 1091
1085 1092 // Determine whether the given region is one that we are using as an
1086 1093 // old GC alloc region.
1087 1094 bool is_old_gc_alloc_region(HeapRegion* hr);
1088 1095
1089 1096 // Perform a collection of the heap; intended for use in implementing
1090 1097 // "System.gc". This probably implies as full a collection as the
1091 1098 // "CollectedHeap" supports.
1092 1099 virtual void collect(GCCause::Cause cause);
1093 1100
1094 1101 // Perform a collection of the heap with the given cause.
1095 1102 // Returns whether this collection actually executed.
1096 1103 bool try_collect(GCCause::Cause cause);
1097 1104
1098 1105 // True iff an evacuation has failed in the most-recent collection.
1099 1106 bool evacuation_failed() { return _evacuation_failed; }
1100 1107
1101 1108 void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1102 1109 void prepend_to_freelist(FreeRegionList* list);
1103 1110 void decrement_summary_bytes(size_t bytes);
1104 1111
1105 1112 virtual bool is_in(const void* p) const;
1106 1113 #ifdef ASSERT
1107 1114 // Returns whether p is in one of the available areas of the heap. Slow but
1108 1115 // extensive version.
1109 1116 bool is_in_exact(const void* p) const;
1110 1117 #endif
1111 1118
1112 1119 // Return "TRUE" iff the given object address is within the collection
1113 1120 // set. Assumes that the reference points into the heap.
1114 1121 inline bool is_in_cset(const HeapRegion *hr);
1115 1122 inline bool is_in_cset(oop obj);
1116 1123 inline bool is_in_cset(HeapWord* addr);
1117 1124
1118 1125 inline bool is_in_cset_or_humongous(const oop obj);
1119 1126
1120 1127 private:
1121 1128 // This array is used for a quick test on whether a reference points into
1122 1129 // the collection set or not. Each of the array's elements denotes whether the
1123 1130 // corresponding region is in the collection set or not.
1124 1131 G1HeapRegionAttrBiasedMappedArray _region_attr;
1125 1132
1126 1133 public:
1127 1134
1128 1135 inline G1HeapRegionAttr region_attr(const void* obj) const;
1129 1136 inline G1HeapRegionAttr region_attr(uint idx) const;
1130 1137
1131 1138 // Return "TRUE" iff the given object address is in the reserved
1132 1139 // region of g1.
1133 1140 bool is_in_g1_reserved(const void* p) const {
1134 1141 return _hrm->reserved().contains(p);
1135 1142 }
1136 1143
1137 1144 // Returns a MemRegion that corresponds to the space that has been
1138 1145 // reserved for the heap
1139 1146 MemRegion g1_reserved() const {
1140 1147 return _hrm->reserved();
1141 1148 }
1142 1149
1143 1150 MemRegion reserved_region() const {
1144 1151 return _reserved;
1145 1152 }
1146 1153
1147 1154 HeapWord* base() const {
1148 1155 return _reserved.start();
1149 1156 }
1150 1157
1151 1158 bool is_in_reserved(const void* addr) const {
1152 1159 return _reserved.contains(addr);
1153 1160 }
1154 1161
1155 1162 G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1156 1163
1157 1164 G1CardTable* card_table() const {
1158 1165 return _card_table;
1159 1166 }
1160 1167
1161 1168 // Iteration functions.
1162 1169
1163 1170 // Iterate over all objects, calling "cl.do_object" on each.
1164 1171 virtual void object_iterate(ObjectClosure* cl);
1165 1172
1166 1173 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1167 1174 virtual void keep_alive(oop obj);
1168 1175
1169 1176 // Iterate over heap regions, in address order, terminating the
1170 1177 // iteration early if the "do_heap_region" method returns "true".
1171 1178 void heap_region_iterate(HeapRegionClosure* blk) const;
1172 1179
1173 1180 // Return the region with the given index. It assumes the index is valid.
1174 1181 inline HeapRegion* region_at(uint index) const;
1175 1182 inline HeapRegion* region_at_or_null(uint index) const;
1176 1183
1177 1184 // Return the next region (by index) that is part of the same
1178 1185 // humongous object that hr is part of.
1179 1186 inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1180 1187
1181 1188 // Calculate the region index of the given address. Given address must be
1182 1189 // within the heap.
1183 1190 inline uint addr_to_region(HeapWord* addr) const;
1184 1191
1185 1192 inline HeapWord* bottom_addr_for_region(uint index) const;
1186 1193
1187 1194 // Two functions to iterate over the heap regions in parallel. Threads
1188 1195 // compete using the HeapRegionClaimer to claim the regions before
1189 1196 // applying the closure on them.
1190 1197 // The _from_worker_offset version uses the HeapRegionClaimer and
1191 1198 // the worker id to calculate a start offset to prevent all workers to
1192 1199 // start from the point.
1193 1200 void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1194 1201 HeapRegionClaimer* hrclaimer,
1195 1202 uint worker_id) const;
1196 1203
1197 1204 void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1198 1205 HeapRegionClaimer* hrclaimer) const;
1199 1206
1200 1207 // Iterate over all regions in the collection set in parallel.
1201 1208 void collection_set_par_iterate_all(HeapRegionClosure* cl,
1202 1209 HeapRegionClaimer* hr_claimer,
1203 1210 uint worker_id);
1204 1211
1205 1212 // Iterate over all regions currently in the current collection set.
1206 1213 void collection_set_iterate_all(HeapRegionClosure* blk);
1207 1214
1208 1215 // Iterate over the regions in the current increment of the collection set.
1209 1216 // Starts the iteration so that the start regions of a given worker id over the
1210 1217 // set active_workers are evenly spread across the set of collection set regions
1211 1218 // to be iterated.
1212 1219 // The variant with the HeapRegionClaimer guarantees that the closure will be
1213 1220 // applied to a particular region exactly once.
1214 1221 void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
1215 1222 collection_set_iterate_increment_from(blk, NULL, worker_id);
1216 1223 }
1217 1224 void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
1218 1225
1219 1226 // Returns the HeapRegion that contains addr. addr must not be NULL.
1220 1227 template <class T>
1221 1228 inline HeapRegion* heap_region_containing(const T addr) const;
1222 1229
1223 1230 // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1224 1231 // region. addr must not be NULL.
1225 1232 template <class T>
1226 1233 inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1227 1234
1228 1235 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1229 1236 // each address in the (reserved) heap is a member of exactly
1230 1237 // one block. The defining characteristic of a block is that it is
1231 1238 // possible to find its size, and thus to progress forward to the next
1232 1239 // block. (Blocks may be of different sizes.) Thus, blocks may
1233 1240 // represent Java objects, or they might be free blocks in a
1234 1241 // free-list-based heap (or subheap), as long as the two kinds are
1235 1242 // distinguishable and the size of each is determinable.
1236 1243
1237 1244 // Returns the address of the start of the "block" that contains the
1238 1245 // address "addr". We say "blocks" instead of "object" since some heaps
1239 1246 // may not pack objects densely; a chunk may either be an object or a
1240 1247 // non-object.
1241 1248 HeapWord* block_start(const void* addr) const;
1242 1249
1243 1250 // Requires "addr" to be the start of a block, and returns "TRUE" iff
1244 1251 // the block is an object.
1245 1252 bool block_is_obj(const HeapWord* addr) const;
1246 1253
1247 1254 // Section on thread-local allocation buffers (TLABs)
1248 1255 // See CollectedHeap for semantics.
1249 1256
1250 1257 bool supports_tlab_allocation() const;
1251 1258 size_t tlab_capacity(Thread* ignored) const;
1252 1259 size_t tlab_used(Thread* ignored) const;
1253 1260 size_t max_tlab_size() const;
1254 1261 size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1255 1262
1256 1263 inline bool is_in_young(const oop obj);
1257 1264
1258 1265 // Returns "true" iff the given word_size is "very large".
1259 1266 static bool is_humongous(size_t word_size) {
1260 1267 // Note this has to be strictly greater-than as the TLABs
1261 1268 // are capped at the humongous threshold and we want to
1262 1269 // ensure that we don't try to allocate a TLAB as
1263 1270 // humongous and that we don't allocate a humongous
1264 1271 // object in a TLAB.
1265 1272 return word_size > _humongous_object_threshold_in_words;
1266 1273 }
1267 1274
1268 1275 // Returns the humongous threshold for a specific region size
1269 1276 static size_t humongous_threshold_for(size_t region_size) {
1270 1277 return (region_size / 2);
1271 1278 }
↓ open down ↓ |
180 lines elided |
↑ open up ↑ |
1272 1279
1273 1280 // Returns the number of regions the humongous object of the given word size
1274 1281 // requires.
1275 1282 static size_t humongous_obj_size_in_regions(size_t word_size);
1276 1283
1277 1284 // Print the maximum heap capacity.
1278 1285 virtual size_t max_capacity() const;
1279 1286
1280 1287 // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1281 1288 virtual size_t max_reserved_capacity() const;
1289 +
1290 + // Print the soft maximum heap capacity.
1291 + size_t soft_max_capacity() const;
1282 1292
1283 1293 virtual jlong millis_since_last_gc();
1284 1294
1285 1295
1286 1296 // Convenience function to be used in situations where the heap type can be
1287 1297 // asserted to be this type.
1288 1298 static G1CollectedHeap* heap();
1289 1299
1290 1300 void set_region_short_lived_locked(HeapRegion* hr);
1291 1301 // add appropriate methods for any other surv rate groups
1292 1302
1293 1303 const G1SurvivorRegions* survivor() const { return &_survivor; }
1294 1304
1295 1305 uint eden_regions_count() const { return _eden.length(); }
1296 1306 uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1297 1307 uint survivor_regions_count() const { return _survivor.length(); }
1298 1308 uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1299 1309 size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1300 1310 size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1301 1311 uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1302 1312 uint old_regions_count() const { return _old_set.length(); }
1303 1313 uint archive_regions_count() const { return _archive_set.length(); }
1304 1314 uint humongous_regions_count() const { return _humongous_set.length(); }
1305 1315
1306 1316 #ifdef ASSERT
1307 1317 bool check_young_list_empty();
1308 1318 #endif
1309 1319
1310 1320 // *** Stuff related to concurrent marking. It's not clear to me that so
1311 1321 // many of these need to be public.
1312 1322
1313 1323 // The functions below are helper functions that a subclass of
1314 1324 // "CollectedHeap" can use in the implementation of its virtual
1315 1325 // functions.
1316 1326 // This performs a concurrent marking of the live objects in a
1317 1327 // bitmap off to the side.
1318 1328 void do_concurrent_mark();
1319 1329
1320 1330 bool is_marked_next(oop obj) const;
1321 1331
1322 1332 // Determine if an object is dead, given the object and also
1323 1333 // the region to which the object belongs. An object is dead
1324 1334 // iff a) it was not allocated since the last mark, b) it
1325 1335 // is not marked, and c) it is not in an archive region.
1326 1336 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1327 1337 return
1328 1338 hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
1329 1339 !hr->is_archive();
1330 1340 }
1331 1341
1332 1342 // This function returns true when an object has been
1333 1343 // around since the previous marking and hasn't yet
1334 1344 // been marked during this marking, and is not in an archive region.
1335 1345 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1336 1346 return
1337 1347 !hr->obj_allocated_since_next_marking(obj) &&
1338 1348 !is_marked_next(obj) &&
1339 1349 !hr->is_archive();
1340 1350 }
1341 1351
1342 1352 // Determine if an object is dead, given only the object itself.
1343 1353 // This will find the region to which the object belongs and
1344 1354 // then call the region version of the same function.
1345 1355
1346 1356 // Added if it is NULL it isn't dead.
1347 1357
1348 1358 inline bool is_obj_dead(const oop obj) const;
1349 1359
1350 1360 inline bool is_obj_ill(const oop obj) const;
1351 1361
1352 1362 inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1353 1363 inline bool is_obj_dead_full(const oop obj) const;
1354 1364
1355 1365 G1ConcurrentMark* concurrent_mark() const { return _cm; }
1356 1366
1357 1367 // Refinement
1358 1368
1359 1369 G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1360 1370
1361 1371 // Optimized nmethod scanning support routines
1362 1372
1363 1373 // Register the given nmethod with the G1 heap.
1364 1374 virtual void register_nmethod(nmethod* nm);
1365 1375
1366 1376 // Unregister the given nmethod from the G1 heap.
1367 1377 virtual void unregister_nmethod(nmethod* nm);
1368 1378
1369 1379 // No nmethod flushing needed.
1370 1380 virtual void flush_nmethod(nmethod* nm) {}
1371 1381
1372 1382 // No nmethod verification implemented.
1373 1383 virtual void verify_nmethod(nmethod* nm) {}
1374 1384
1375 1385 // Free up superfluous code root memory.
1376 1386 void purge_code_root_memory();
1377 1387
1378 1388 // Rebuild the strong code root lists for each region
1379 1389 // after a full GC.
1380 1390 void rebuild_strong_code_roots();
1381 1391
1382 1392 // Partial cleaning of VM internal data structures.
1383 1393 void string_dedup_cleaning(BoolObjectClosure* is_alive,
1384 1394 OopClosure* keep_alive,
1385 1395 G1GCPhaseTimes* phase_times = NULL);
1386 1396
1387 1397 // Performs cleaning of data structures after class unloading.
1388 1398 void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred);
1389 1399
1390 1400 // Redirty logged cards in the refinement queue.
1391 1401 void redirty_logged_cards(G1RedirtyCardsQueueSet* rdcqs);
1392 1402
1393 1403 // Verification
1394 1404
1395 1405 // Deduplicate the string
1396 1406 virtual void deduplicate_string(oop str);
1397 1407
1398 1408 // Perform any cleanup actions necessary before allowing a verification.
1399 1409 virtual void prepare_for_verify();
1400 1410
1401 1411 // Perform verification.
1402 1412
1403 1413 // vo == UsePrevMarking -> use "prev" marking information,
1404 1414 // vo == UseNextMarking -> use "next" marking information
1405 1415 // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1406 1416 //
1407 1417 // NOTE: Only the "prev" marking information is guaranteed to be
1408 1418 // consistent most of the time, so most calls to this should use
1409 1419 // vo == UsePrevMarking.
1410 1420 // Currently, there is only one case where this is called with
1411 1421 // vo == UseNextMarking, which is to verify the "next" marking
1412 1422 // information at the end of remark.
1413 1423 // Currently there is only one place where this is called with
1414 1424 // vo == UseFullMarking, which is to verify the marking during a
1415 1425 // full GC.
1416 1426 void verify(VerifyOption vo);
1417 1427
1418 1428 // WhiteBox testing support.
1419 1429 virtual bool supports_concurrent_phase_control() const;
1420 1430 virtual bool request_concurrent_phase(const char* phase);
1421 1431 bool is_heterogeneous_heap() const;
1422 1432
1423 1433 virtual WorkGang* get_safepoint_workers() { return _workers; }
1424 1434
1425 1435 // The methods below are here for convenience and dispatch the
1426 1436 // appropriate method depending on value of the given VerifyOption
1427 1437 // parameter. The values for that parameter, and their meanings,
1428 1438 // are the same as those above.
1429 1439
1430 1440 bool is_obj_dead_cond(const oop obj,
1431 1441 const HeapRegion* hr,
1432 1442 const VerifyOption vo) const;
1433 1443
1434 1444 bool is_obj_dead_cond(const oop obj,
1435 1445 const VerifyOption vo) const;
1436 1446
1437 1447 G1HeapSummary create_g1_heap_summary();
1438 1448 G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1439 1449
1440 1450 // Printing
1441 1451 private:
1442 1452 void print_heap_regions() const;
1443 1453 void print_regions_on(outputStream* st) const;
1444 1454
1445 1455 public:
1446 1456 virtual void print_on(outputStream* st) const;
1447 1457 virtual void print_extended_on(outputStream* st) const;
1448 1458 virtual void print_on_error(outputStream* st) const;
1449 1459
1450 1460 virtual void print_gc_threads_on(outputStream* st) const;
1451 1461 virtual void gc_threads_do(ThreadClosure* tc) const;
1452 1462
1453 1463 // Override
1454 1464 void print_tracing_info() const;
1455 1465
1456 1466 // The following two methods are helpful for debugging RSet issues.
1457 1467 void print_cset_rsets() PRODUCT_RETURN;
1458 1468 void print_all_rsets() PRODUCT_RETURN;
1459 1469
1460 1470 // Used to print information about locations in the hs_err file.
1461 1471 virtual bool print_location(outputStream* st, void* addr) const;
1462 1472
1463 1473 size_t pending_card_num();
1464 1474 };
1465 1475
1466 1476 class G1ParEvacuateFollowersClosure : public VoidClosure {
1467 1477 private:
1468 1478 double _start_term;
1469 1479 double _term_time;
1470 1480 size_t _term_attempts;
1471 1481
1472 1482 void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1473 1483 void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
1474 1484 protected:
1475 1485 G1CollectedHeap* _g1h;
1476 1486 G1ParScanThreadState* _par_scan_state;
1477 1487 RefToScanQueueSet* _queues;
1478 1488 ParallelTaskTerminator* _terminator;
1479 1489 G1GCPhaseTimes::GCParPhases _phase;
1480 1490
1481 1491 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
1482 1492 RefToScanQueueSet* queues() { return _queues; }
1483 1493 ParallelTaskTerminator* terminator() { return _terminator; }
1484 1494
1485 1495 public:
1486 1496 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1487 1497 G1ParScanThreadState* par_scan_state,
1488 1498 RefToScanQueueSet* queues,
1489 1499 ParallelTaskTerminator* terminator,
1490 1500 G1GCPhaseTimes::GCParPhases phase)
1491 1501 : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1492 1502 _g1h(g1h), _par_scan_state(par_scan_state),
1493 1503 _queues(queues), _terminator(terminator), _phase(phase) {}
1494 1504
1495 1505 void do_void();
1496 1506
1497 1507 double term_time() const { return _term_time; }
1498 1508 size_t term_attempts() const { return _term_attempts; }
1499 1509
1500 1510 private:
1501 1511 inline bool offer_termination();
1502 1512 };
1503 1513
1504 1514 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP
↓ open down ↓ |
213 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX