Print this page
Abort concurrent mark
Split |
Close |
Expand all |
Collapse all |
--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/classLoaderDataGraph.hpp"
27 27 #include "classfile/metadataOnStackMark.hpp"
28 28 #include "classfile/stringTable.hpp"
29 29 #include "code/codeCache.hpp"
30 30 #include "code/icBuffer.hpp"
31 31 #include "gc/g1/g1Allocator.inline.hpp"
32 32 #include "gc/g1/g1Arguments.hpp"
33 33 #include "gc/g1/g1BarrierSet.hpp"
34 34 #include "gc/g1/g1CardTableEntryClosure.hpp"
35 35 #include "gc/g1/g1CollectedHeap.inline.hpp"
36 36 #include "gc/g1/g1CollectionSet.hpp"
37 37 #include "gc/g1/g1CollectorState.hpp"
38 38 #include "gc/g1/g1ConcurrentRefine.hpp"
39 39 #include "gc/g1/g1ConcurrentRefineThread.hpp"
40 40 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
41 41 #include "gc/g1/g1DirtyCardQueue.hpp"
42 42 #include "gc/g1/g1EvacStats.inline.hpp"
43 43 #include "gc/g1/g1FullCollector.hpp"
44 44 #include "gc/g1/g1GCPhaseTimes.hpp"
45 45 #include "gc/g1/g1HeapSizingPolicy.hpp"
46 46 #include "gc/g1/g1HeapTransition.hpp"
47 47 #include "gc/g1/g1HeapVerifier.hpp"
48 48 #include "gc/g1/g1HotCardCache.hpp"
49 49 #include "gc/g1/g1MemoryPool.hpp"
50 50 #include "gc/g1/g1OopClosures.inline.hpp"
51 51 #include "gc/g1/g1ParallelCleaning.hpp"
52 52 #include "gc/g1/g1ParScanThreadState.inline.hpp"
53 53 #include "gc/g1/g1Policy.hpp"
54 54 #include "gc/g1/g1RedirtyCardsQueue.hpp"
55 55 #include "gc/g1/g1RegionToSpaceMapper.hpp"
56 56 #include "gc/g1/g1RemSet.hpp"
57 57 #include "gc/g1/g1RootClosures.hpp"
58 58 #include "gc/g1/g1RootProcessor.hpp"
59 59 #include "gc/g1/g1SATBMarkQueueSet.hpp"
60 60 #include "gc/g1/g1StringDedup.hpp"
61 61 #include "gc/g1/g1ThreadLocalData.hpp"
62 62 #include "gc/g1/g1Trace.hpp"
63 63 #include "gc/g1/g1YCTypes.hpp"
64 64 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
65 65 #include "gc/g1/g1VMOperations.hpp"
66 66 #include "gc/g1/heapRegion.inline.hpp"
67 67 #include "gc/g1/heapRegionRemSet.hpp"
68 68 #include "gc/g1/heapRegionSet.inline.hpp"
69 69 #include "gc/shared/concurrentGCBreakpoints.hpp"
70 70 #include "gc/shared/gcBehaviours.hpp"
71 71 #include "gc/shared/gcHeapSummary.hpp"
72 72 #include "gc/shared/gcId.hpp"
73 73 #include "gc/shared/gcLocker.hpp"
74 74 #include "gc/shared/gcTimer.hpp"
75 75 #include "gc/shared/gcTraceTime.inline.hpp"
76 76 #include "gc/shared/generationSpec.hpp"
77 77 #include "gc/shared/isGCActiveMark.hpp"
78 78 #include "gc/shared/locationPrinter.inline.hpp"
79 79 #include "gc/shared/oopStorageParState.hpp"
80 80 #include "gc/shared/preservedMarks.inline.hpp"
81 81 #include "gc/shared/suspendibleThreadSet.hpp"
82 82 #include "gc/shared/referenceProcessor.inline.hpp"
83 83 #include "gc/shared/taskTerminator.hpp"
84 84 #include "gc/shared/taskqueue.inline.hpp"
85 85 #include "gc/shared/weakProcessor.inline.hpp"
86 86 #include "gc/shared/workerPolicy.hpp"
87 87 #include "logging/log.hpp"
88 88 #include "memory/allocation.hpp"
89 89 #include "memory/iterator.hpp"
90 90 #include "memory/resourceArea.hpp"
91 91 #include "memory/universe.hpp"
92 92 #include "oops/access.inline.hpp"
93 93 #include "oops/compressedOops.inline.hpp"
94 94 #include "oops/oop.inline.hpp"
95 95 #include "runtime/atomic.hpp"
96 96 #include "runtime/flags/flagSetting.hpp"
97 97 #include "runtime/handles.inline.hpp"
98 98 #include "runtime/init.hpp"
99 99 #include "runtime/orderAccess.hpp"
100 100 #include "runtime/threadSMR.hpp"
101 101 #include "runtime/vmThread.hpp"
102 102 #include "utilities/align.hpp"
103 103 #include "utilities/bitMap.inline.hpp"
104 104 #include "utilities/globalDefinitions.hpp"
105 105 #include "utilities/stack.inline.hpp"
106 106
107 107 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
108 108
109 109 // INVARIANTS/NOTES
110 110 //
111 111 // All allocation activity covered by the G1CollectedHeap interface is
112 112 // serialized by acquiring the HeapLock. This happens in mem_allocate
113 113 // and allocate_new_tlab, which are the "entry" points to the
114 114 // allocation code from the rest of the JVM. (Note that this does not
115 115 // apply to TLAB allocation, which is not part of this interface: it
116 116 // is done by clients of this interface.)
117 117
118 118 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
119 119 private:
120 120 size_t _num_dirtied;
121 121 G1CollectedHeap* _g1h;
122 122 G1CardTable* _g1_ct;
123 123
124 124 HeapRegion* region_for_card(CardValue* card_ptr) const {
125 125 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
126 126 }
127 127
128 128 bool will_become_free(HeapRegion* hr) const {
129 129 // A region will be freed by free_collection_set if the region is in the
130 130 // collection set and has not had an evacuation failure.
131 131 return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
132 132 }
133 133
134 134 public:
135 135 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
136 136 _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
137 137
138 138 void do_card_ptr(CardValue* card_ptr, uint worker_id) {
139 139 HeapRegion* hr = region_for_card(card_ptr);
140 140
141 141 // Should only dirty cards in regions that won't be freed.
142 142 if (!will_become_free(hr)) {
143 143 *card_ptr = G1CardTable::dirty_card_val();
144 144 _num_dirtied++;
145 145 }
146 146 }
147 147
148 148 size_t num_dirtied() const { return _num_dirtied; }
149 149 };
150 150
151 151
152 152 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
153 153 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
154 154 }
155 155
156 156 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
157 157 // The from card cache is not the memory that is actually committed. So we cannot
158 158 // take advantage of the zero_filled parameter.
159 159 reset_from_card_cache(start_idx, num_regions);
160 160 }
161 161
162 162 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
163 163 Ticks start = Ticks::now();
164 164 workers()->run_task(task, workers()->active_workers());
165 165 return Ticks::now() - start;
166 166 }
167 167
168 168 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
169 169 MemRegion mr) {
170 170 return new HeapRegion(hrs_index, bot(), mr);
171 171 }
172 172
173 173 // Private methods.
174 174
175 175 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
176 176 HeapRegionType type,
177 177 bool do_expand,
178 178 uint node_index) {
179 179 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
180 180 "the only time we use this to allocate a humongous region is "
181 181 "when we are allocating a single humongous region");
182 182
183 183 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
184 184
185 185 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
186 186 // Currently, only attempts to allocate GC alloc regions set
187 187 // do_expand to true. So, we should only reach here during a
188 188 // safepoint. If this assumption changes we might have to
189 189 // reconsider the use of _expand_heap_after_alloc_failure.
190 190 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
191 191
192 192 log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
193 193 word_size * HeapWordSize);
194 194
195 195 assert(word_size * HeapWordSize < HeapRegion::GrainBytes,
196 196 "This kind of expansion should never be more than one region. Size: " SIZE_FORMAT,
197 197 word_size * HeapWordSize);
198 198 if (expand_single_region(node_index)) {
199 199 // Given that expand_single_region() succeeded in expanding the heap, and we
200 200 // always expand the heap by an amount aligned to the heap
201 201 // region size, the free list should in theory not be empty.
202 202 // In either case allocate_free_region() will check for NULL.
203 203 res = _hrm->allocate_free_region(type, node_index);
204 204 } else {
205 205 _expand_heap_after_alloc_failure = false;
206 206 }
207 207 }
208 208 return res;
209 209 }
210 210
211 211 HeapWord*
212 212 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
213 213 uint num_regions,
214 214 size_t word_size) {
215 215 assert(first != G1_NO_HRM_INDEX, "pre-condition");
216 216 assert(is_humongous(word_size), "word_size should be humongous");
217 217 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
218 218
219 219 // Index of last region in the series.
220 220 uint last = first + num_regions - 1;
221 221
222 222 // We need to initialize the region(s) we just discovered. This is
223 223 // a bit tricky given that it can happen concurrently with
224 224 // refinement threads refining cards on these regions and
225 225 // potentially wanting to refine the BOT as they are scanning
226 226 // those cards (this can happen shortly after a cleanup; see CR
227 227 // 6991377). So we have to set up the region(s) carefully and in
228 228 // a specific order.
229 229
230 230 // The word size sum of all the regions we will allocate.
231 231 size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
232 232 assert(word_size <= word_size_sum, "sanity");
233 233
234 234 // This will be the "starts humongous" region.
235 235 HeapRegion* first_hr = region_at(first);
236 236 // The header of the new object will be placed at the bottom of
237 237 // the first region.
238 238 HeapWord* new_obj = first_hr->bottom();
239 239 // This will be the new top of the new object.
240 240 HeapWord* obj_top = new_obj + word_size;
241 241
242 242 // First, we need to zero the header of the space that we will be
243 243 // allocating. When we update top further down, some refinement
244 244 // threads might try to scan the region. By zeroing the header we
245 245 // ensure that any thread that will try to scan the region will
246 246 // come across the zero klass word and bail out.
247 247 //
248 248 // NOTE: It would not have been correct to have used
249 249 // CollectedHeap::fill_with_object() and make the space look like
250 250 // an int array. The thread that is doing the allocation will
251 251 // later update the object header to a potentially different array
252 252 // type and, for a very short period of time, the klass and length
253 253 // fields will be inconsistent. This could cause a refinement
254 254 // thread to calculate the object size incorrectly.
255 255 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
256 256
257 257 // Next, pad out the unused tail of the last region with filler
258 258 // objects, for improved usage accounting.
259 259 // How many words we use for filler objects.
260 260 size_t word_fill_size = word_size_sum - word_size;
261 261
262 262 // How many words memory we "waste" which cannot hold a filler object.
263 263 size_t words_not_fillable = 0;
264 264
265 265 if (word_fill_size >= min_fill_size()) {
266 266 fill_with_objects(obj_top, word_fill_size);
267 267 } else if (word_fill_size > 0) {
268 268 // We have space to fill, but we cannot fit an object there.
269 269 words_not_fillable = word_fill_size;
270 270 word_fill_size = 0;
271 271 }
272 272
273 273 // We will set up the first region as "starts humongous". This
274 274 // will also update the BOT covering all the regions to reflect
275 275 // that there is a single object that starts at the bottom of the
276 276 // first region.
277 277 first_hr->set_starts_humongous(obj_top, word_fill_size);
278 278 _policy->remset_tracker()->update_at_allocate(first_hr);
279 279 // Then, if there are any, we will set up the "continues
280 280 // humongous" regions.
281 281 HeapRegion* hr = NULL;
282 282 for (uint i = first + 1; i <= last; ++i) {
283 283 hr = region_at(i);
284 284 hr->set_continues_humongous(first_hr);
285 285 _policy->remset_tracker()->update_at_allocate(hr);
286 286 }
287 287
288 288 // Up to this point no concurrent thread would have been able to
289 289 // do any scanning on any region in this series. All the top
290 290 // fields still point to bottom, so the intersection between
291 291 // [bottom,top] and [card_start,card_end] will be empty. Before we
292 292 // update the top fields, we'll do a storestore to make sure that
293 293 // no thread sees the update to top before the zeroing of the
294 294 // object header and the BOT initialization.
295 295 OrderAccess::storestore();
296 296
297 297 // Now, we will update the top fields of the "continues humongous"
298 298 // regions except the last one.
299 299 for (uint i = first; i < last; ++i) {
300 300 hr = region_at(i);
301 301 hr->set_top(hr->end());
302 302 }
303 303
304 304 hr = region_at(last);
305 305 // If we cannot fit a filler object, we must set top to the end
306 306 // of the humongous object, otherwise we cannot iterate the heap
307 307 // and the BOT will not be complete.
308 308 hr->set_top(hr->end() - words_not_fillable);
309 309
310 310 assert(hr->bottom() < obj_top && obj_top <= hr->end(),
311 311 "obj_top should be in last region");
312 312
313 313 _verifier->check_bitmaps("Humongous Region Allocation", first_hr);
314 314
315 315 assert(words_not_fillable == 0 ||
316 316 first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
317 317 "Miscalculation in humongous allocation");
318 318
319 319 increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
320 320
321 321 for (uint i = first; i <= last; ++i) {
322 322 hr = region_at(i);
323 323 _humongous_set.add(hr);
324 324 _hr_printer.alloc(hr);
325 325 }
326 326
327 327 return new_obj;
328 328 }
329 329
330 330 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
331 331 assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
332 332 return align_up(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
333 333 }
334 334
335 335 // If could fit into free regions w/o expansion, try.
336 336 // Otherwise, if can expand, do so.
337 337 // Otherwise, if using ex regions might help, try with ex given back.
338 338 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
339 339 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
340 340
341 341 _verifier->verify_region_sets_optional();
342 342
343 343 uint first = G1_NO_HRM_INDEX;
344 344 uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
345 345
346 346 if (obj_regions == 1) {
347 347 // Only one region to allocate, try to use a fast path by directly allocating
348 348 // from the free lists. Do not try to expand here, we will potentially do that
349 349 // later.
350 350 HeapRegion* hr = new_region(word_size, HeapRegionType::Humongous, false /* do_expand */);
351 351 if (hr != NULL) {
352 352 first = hr->hrm_index();
353 353 }
354 354 } else {
355 355 // Policy: Try only empty regions (i.e. already committed first). Maybe we
356 356 // are lucky enough to find some.
357 357 first = _hrm->find_contiguous_only_empty(obj_regions);
358 358 if (first != G1_NO_HRM_INDEX) {
359 359 _hrm->allocate_free_regions_starting_at(first, obj_regions);
360 360 }
361 361 }
362 362
363 363 if (first == G1_NO_HRM_INDEX) {
364 364 // Policy: We could not find enough regions for the humongous object in the
365 365 // free list. Look through the heap to find a mix of free and uncommitted regions.
366 366 // If so, try expansion.
367 367 first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
368 368 if (first != G1_NO_HRM_INDEX) {
369 369 // We found something. Make sure these regions are committed, i.e. expand
370 370 // the heap. Alternatively we could do a defragmentation GC.
371 371 log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
372 372 word_size * HeapWordSize);
373 373
374 374 _hrm->expand_at(first, obj_regions, workers());
375 375 policy()->record_new_heap_size(num_regions());
376 376
377 377 #ifdef ASSERT
378 378 for (uint i = first; i < first + obj_regions; ++i) {
379 379 HeapRegion* hr = region_at(i);
380 380 assert(hr->is_free(), "sanity");
381 381 assert(hr->is_empty(), "sanity");
382 382 assert(is_on_master_free_list(hr), "sanity");
383 383 }
384 384 #endif
385 385 _hrm->allocate_free_regions_starting_at(first, obj_regions);
386 386 } else {
387 387 // Policy: Potentially trigger a defragmentation GC.
388 388 }
389 389 }
390 390
391 391 HeapWord* result = NULL;
392 392 if (first != G1_NO_HRM_INDEX) {
393 393 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
394 394 assert(result != NULL, "it should always return a valid result");
395 395
396 396 // A successful humongous object allocation changes the used space
397 397 // information of the old generation so we need to recalculate the
398 398 // sizes and update the jstat counters here.
399 399 g1mm()->update_sizes();
400 400 }
401 401
402 402 _verifier->verify_region_sets_optional();
403 403
404 404 return result;
405 405 }
406 406
407 407 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size,
408 408 size_t requested_size,
409 409 size_t* actual_size) {
410 410 assert_heap_not_locked_and_not_at_safepoint();
411 411 assert(!is_humongous(requested_size), "we do not allow humongous TLABs");
412 412
413 413 return attempt_allocation(min_size, requested_size, actual_size);
414 414 }
415 415
416 416 HeapWord*
417 417 G1CollectedHeap::mem_allocate(size_t word_size,
418 418 bool* gc_overhead_limit_was_exceeded) {
419 419 assert_heap_not_locked_and_not_at_safepoint();
420 420
421 421 if (is_humongous(word_size)) {
422 422 return attempt_allocation_humongous(word_size);
423 423 }
424 424 size_t dummy = 0;
425 425 return attempt_allocation(word_size, word_size, &dummy);
426 426 }
427 427
428 428 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
429 429 ResourceMark rm; // For retrieving the thread names in log messages.
430 430
431 431 // Make sure you read the note in attempt_allocation_humongous().
432 432
433 433 assert_heap_not_locked_and_not_at_safepoint();
434 434 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
435 435 "be called for humongous allocation requests");
436 436
437 437 // We should only get here after the first-level allocation attempt
438 438 // (attempt_allocation()) failed to allocate.
439 439
440 440 // We will loop until a) we manage to successfully perform the
441 441 // allocation or b) we successfully schedule a collection which
442 442 // fails to perform the allocation. b) is the only case when we'll
443 443 // return NULL.
444 444 HeapWord* result = NULL;
445 445 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
446 446 bool should_try_gc;
447 447 uint gc_count_before;
448 448
449 449 {
450 450 MutexLocker x(Heap_lock);
451 451 result = _allocator->attempt_allocation_locked(word_size);
452 452 if (result != NULL) {
453 453 return result;
454 454 }
455 455
456 456 // If the GCLocker is active and we are bound for a GC, try expanding young gen.
457 457 // This is different to when only GCLocker::needs_gc() is set: try to avoid
458 458 // waiting because the GCLocker is active to not wait too long.
459 459 if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) {
460 460 // No need for an ergo message here, can_expand_young_list() does this when
461 461 // it returns true.
462 462 result = _allocator->attempt_allocation_force(word_size);
463 463 if (result != NULL) {
464 464 return result;
465 465 }
466 466 }
467 467 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
468 468 // the GCLocker initiated GC has been performed and then retry. This includes
469 469 // the case when the GC Locker is not active but has not been performed.
470 470 should_try_gc = !GCLocker::needs_gc();
471 471 // Read the GC count while still holding the Heap_lock.
472 472 gc_count_before = total_collections();
473 473 }
474 474
475 475 if (should_try_gc) {
476 476 bool succeeded;
477 477 result = do_collection_pause(word_size, gc_count_before, &succeeded,
478 478 GCCause::_g1_inc_collection_pause);
479 479 if (result != NULL) {
480 480 assert(succeeded, "only way to get back a non-NULL result");
481 481 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
482 482 Thread::current()->name(), p2i(result));
483 483 return result;
484 484 }
485 485
486 486 if (succeeded) {
487 487 // We successfully scheduled a collection which failed to allocate. No
488 488 // point in trying to allocate further. We'll just return NULL.
489 489 log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
490 490 SIZE_FORMAT " words", Thread::current()->name(), word_size);
491 491 return NULL;
492 492 }
493 493 log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
494 494 Thread::current()->name(), word_size);
495 495 } else {
496 496 // Failed to schedule a collection.
497 497 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
498 498 log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
499 499 SIZE_FORMAT " words", Thread::current()->name(), word_size);
500 500 return NULL;
501 501 }
502 502 log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
503 503 // The GCLocker is either active or the GCLocker initiated
504 504 // GC has not yet been performed. Stall until it is and
505 505 // then retry the allocation.
506 506 GCLocker::stall_until_clear();
507 507 gclocker_retry_count += 1;
508 508 }
509 509
510 510 // We can reach here if we were unsuccessful in scheduling a
511 511 // collection (because another thread beat us to it) or if we were
512 512 // stalled due to the GC locker. In either can we should retry the
513 513 // allocation attempt in case another thread successfully
514 514 // performed a collection and reclaimed enough space. We do the
515 515 // first attempt (without holding the Heap_lock) here and the
516 516 // follow-on attempt will be at the start of the next loop
517 517 // iteration (after taking the Heap_lock).
518 518 size_t dummy = 0;
519 519 result = _allocator->attempt_allocation(word_size, word_size, &dummy);
520 520 if (result != NULL) {
521 521 return result;
522 522 }
523 523
524 524 // Give a warning if we seem to be looping forever.
525 525 if ((QueuedAllocationWarningCount > 0) &&
526 526 (try_count % QueuedAllocationWarningCount == 0)) {
527 527 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
528 528 Thread::current()->name(), try_count, word_size);
529 529 }
530 530 }
531 531
532 532 ShouldNotReachHere();
533 533 return NULL;
534 534 }
535 535
536 536 void G1CollectedHeap::begin_archive_alloc_range(bool open) {
537 537 assert_at_safepoint_on_vm_thread();
538 538 if (_archive_allocator == NULL) {
539 539 _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
540 540 }
541 541 }
542 542
543 543 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
544 544 // Allocations in archive regions cannot be of a size that would be considered
545 545 // humongous even for a minimum-sized region, because G1 region sizes/boundaries
546 546 // may be different at archive-restore time.
547 547 return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
548 548 }
549 549
550 550 HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
551 551 assert_at_safepoint_on_vm_thread();
552 552 assert(_archive_allocator != NULL, "_archive_allocator not initialized");
553 553 if (is_archive_alloc_too_large(word_size)) {
554 554 return NULL;
555 555 }
556 556 return _archive_allocator->archive_mem_allocate(word_size);
557 557 }
558 558
559 559 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
560 560 size_t end_alignment_in_bytes) {
561 561 assert_at_safepoint_on_vm_thread();
562 562 assert(_archive_allocator != NULL, "_archive_allocator not initialized");
563 563
564 564 // Call complete_archive to do the real work, filling in the MemRegion
565 565 // array with the archive regions.
566 566 _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
567 567 delete _archive_allocator;
568 568 _archive_allocator = NULL;
569 569 }
570 570
571 571 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
572 572 assert(ranges != NULL, "MemRegion array NULL");
573 573 assert(count != 0, "No MemRegions provided");
574 574 MemRegion reserved = _hrm->reserved();
575 575 for (size_t i = 0; i < count; i++) {
576 576 if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
577 577 return false;
578 578 }
579 579 }
580 580 return true;
581 581 }
582 582
583 583 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
584 584 size_t count,
585 585 bool open) {
586 586 assert(!is_init_completed(), "Expect to be called at JVM init time");
587 587 assert(ranges != NULL, "MemRegion array NULL");
588 588 assert(count != 0, "No MemRegions provided");
589 589 MutexLocker x(Heap_lock);
590 590
591 591 MemRegion reserved = _hrm->reserved();
592 592 HeapWord* prev_last_addr = NULL;
593 593 HeapRegion* prev_last_region = NULL;
594 594
595 595 // Temporarily disable pretouching of heap pages. This interface is used
596 596 // when mmap'ing archived heap data in, so pre-touching is wasted.
597 597 FlagSetting fs(AlwaysPreTouch, false);
598 598
599 599 // Enable archive object checking used by G1MarkSweep. We have to let it know
600 600 // about each archive range, so that objects in those ranges aren't marked.
601 601 G1ArchiveAllocator::enable_archive_object_check();
602 602
603 603 // For each specified MemRegion range, allocate the corresponding G1
604 604 // regions and mark them as archive regions. We expect the ranges
605 605 // in ascending starting address order, without overlap.
606 606 for (size_t i = 0; i < count; i++) {
607 607 MemRegion curr_range = ranges[i];
608 608 HeapWord* start_address = curr_range.start();
609 609 size_t word_size = curr_range.word_size();
610 610 HeapWord* last_address = curr_range.last();
611 611 size_t commits = 0;
612 612
613 613 guarantee(reserved.contains(start_address) && reserved.contains(last_address),
614 614 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
615 615 p2i(start_address), p2i(last_address));
616 616 guarantee(start_address > prev_last_addr,
617 617 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
618 618 p2i(start_address), p2i(prev_last_addr));
619 619 prev_last_addr = last_address;
620 620
621 621 // Check for ranges that start in the same G1 region in which the previous
622 622 // range ended, and adjust the start address so we don't try to allocate
623 623 // the same region again. If the current range is entirely within that
624 624 // region, skip it, just adjusting the recorded top.
625 625 HeapRegion* start_region = _hrm->addr_to_region(start_address);
626 626 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
627 627 start_address = start_region->end();
628 628 if (start_address > last_address) {
629 629 increase_used(word_size * HeapWordSize);
630 630 start_region->set_top(last_address + 1);
631 631 continue;
632 632 }
633 633 start_region->set_top(start_address);
634 634 curr_range = MemRegion(start_address, last_address + 1);
635 635 start_region = _hrm->addr_to_region(start_address);
636 636 }
637 637
638 638 // Perform the actual region allocation, exiting if it fails.
639 639 // Then note how much new space we have allocated.
640 640 if (!_hrm->allocate_containing_regions(curr_range, &commits, workers())) {
641 641 return false;
642 642 }
643 643 increase_used(word_size * HeapWordSize);
644 644 if (commits != 0) {
645 645 log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
646 646 HeapRegion::GrainWords * HeapWordSize * commits);
647 647
648 648 }
649 649
650 650 // Mark each G1 region touched by the range as archive, add it to
651 651 // the old set, and set top.
652 652 HeapRegion* curr_region = _hrm->addr_to_region(start_address);
653 653 HeapRegion* last_region = _hrm->addr_to_region(last_address);
654 654 prev_last_region = last_region;
655 655
656 656 while (curr_region != NULL) {
657 657 assert(curr_region->is_empty() && !curr_region->is_pinned(),
658 658 "Region already in use (index %u)", curr_region->hrm_index());
659 659 if (open) {
660 660 curr_region->set_open_archive();
661 661 } else {
662 662 curr_region->set_closed_archive();
663 663 }
664 664 _hr_printer.alloc(curr_region);
665 665 _archive_set.add(curr_region);
666 666 HeapWord* top;
667 667 HeapRegion* next_region;
668 668 if (curr_region != last_region) {
669 669 top = curr_region->end();
670 670 next_region = _hrm->next_region_in_heap(curr_region);
671 671 } else {
672 672 top = last_address + 1;
673 673 next_region = NULL;
674 674 }
675 675 curr_region->set_top(top);
676 676 curr_region = next_region;
677 677 }
678 678
679 679 // Notify mark-sweep of the archive
680 680 G1ArchiveAllocator::set_range_archive(curr_range, open);
681 681 }
682 682 return true;
683 683 }
684 684
685 685 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
686 686 assert(!is_init_completed(), "Expect to be called at JVM init time");
687 687 assert(ranges != NULL, "MemRegion array NULL");
688 688 assert(count != 0, "No MemRegions provided");
689 689 MemRegion reserved = _hrm->reserved();
690 690 HeapWord *prev_last_addr = NULL;
691 691 HeapRegion* prev_last_region = NULL;
692 692
693 693 // For each MemRegion, create filler objects, if needed, in the G1 regions
694 694 // that contain the address range. The address range actually within the
695 695 // MemRegion will not be modified. That is assumed to have been initialized
696 696 // elsewhere, probably via an mmap of archived heap data.
697 697 MutexLocker x(Heap_lock);
698 698 for (size_t i = 0; i < count; i++) {
699 699 HeapWord* start_address = ranges[i].start();
700 700 HeapWord* last_address = ranges[i].last();
701 701
702 702 assert(reserved.contains(start_address) && reserved.contains(last_address),
703 703 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
704 704 p2i(start_address), p2i(last_address));
705 705 assert(start_address > prev_last_addr,
706 706 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
707 707 p2i(start_address), p2i(prev_last_addr));
708 708
709 709 HeapRegion* start_region = _hrm->addr_to_region(start_address);
710 710 HeapRegion* last_region = _hrm->addr_to_region(last_address);
711 711 HeapWord* bottom_address = start_region->bottom();
712 712
713 713 // Check for a range beginning in the same region in which the
714 714 // previous one ended.
715 715 if (start_region == prev_last_region) {
716 716 bottom_address = prev_last_addr + 1;
717 717 }
718 718
719 719 // Verify that the regions were all marked as archive regions by
720 720 // alloc_archive_regions.
721 721 HeapRegion* curr_region = start_region;
722 722 while (curr_region != NULL) {
723 723 guarantee(curr_region->is_archive(),
724 724 "Expected archive region at index %u", curr_region->hrm_index());
725 725 if (curr_region != last_region) {
726 726 curr_region = _hrm->next_region_in_heap(curr_region);
727 727 } else {
728 728 curr_region = NULL;
729 729 }
730 730 }
731 731
732 732 prev_last_addr = last_address;
733 733 prev_last_region = last_region;
734 734
735 735 // Fill the memory below the allocated range with dummy object(s),
736 736 // if the region bottom does not match the range start, or if the previous
737 737 // range ended within the same G1 region, and there is a gap.
738 738 if (start_address != bottom_address) {
739 739 size_t fill_size = pointer_delta(start_address, bottom_address);
740 740 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
741 741 increase_used(fill_size * HeapWordSize);
742 742 }
743 743 }
744 744 }
745 745
746 746 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
747 747 size_t desired_word_size,
748 748 size_t* actual_word_size) {
749 749 assert_heap_not_locked_and_not_at_safepoint();
750 750 assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
751 751 "be called for humongous allocation requests");
752 752
753 753 HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
754 754
755 755 if (result == NULL) {
756 756 *actual_word_size = desired_word_size;
757 757 result = attempt_allocation_slow(desired_word_size);
758 758 }
759 759
760 760 assert_heap_not_locked();
761 761 if (result != NULL) {
762 762 assert(*actual_word_size != 0, "Actual size must have been set here");
763 763 dirty_young_block(result, *actual_word_size);
764 764 } else {
765 765 *actual_word_size = 0;
766 766 }
767 767
768 768 return result;
769 769 }
770 770
771 771 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
772 772 assert(!is_init_completed(), "Expect to be called at JVM init time");
773 773 assert(ranges != NULL, "MemRegion array NULL");
774 774 assert(count != 0, "No MemRegions provided");
775 775 MemRegion reserved = _hrm->reserved();
776 776 HeapWord* prev_last_addr = NULL;
777 777 HeapRegion* prev_last_region = NULL;
778 778 size_t size_used = 0;
779 779 size_t uncommitted_regions = 0;
780 780
781 781 // For each Memregion, free the G1 regions that constitute it, and
782 782 // notify mark-sweep that the range is no longer to be considered 'archive.'
783 783 MutexLocker x(Heap_lock);
784 784 for (size_t i = 0; i < count; i++) {
785 785 HeapWord* start_address = ranges[i].start();
786 786 HeapWord* last_address = ranges[i].last();
787 787
788 788 assert(reserved.contains(start_address) && reserved.contains(last_address),
789 789 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
790 790 p2i(start_address), p2i(last_address));
791 791 assert(start_address > prev_last_addr,
792 792 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
793 793 p2i(start_address), p2i(prev_last_addr));
794 794 size_used += ranges[i].byte_size();
795 795 prev_last_addr = last_address;
796 796
797 797 HeapRegion* start_region = _hrm->addr_to_region(start_address);
798 798 HeapRegion* last_region = _hrm->addr_to_region(last_address);
799 799
800 800 // Check for ranges that start in the same G1 region in which the previous
801 801 // range ended, and adjust the start address so we don't try to free
802 802 // the same region again. If the current range is entirely within that
803 803 // region, skip it.
804 804 if (start_region == prev_last_region) {
805 805 start_address = start_region->end();
806 806 if (start_address > last_address) {
807 807 continue;
808 808 }
809 809 start_region = _hrm->addr_to_region(start_address);
810 810 }
811 811 prev_last_region = last_region;
812 812
813 813 // After verifying that each region was marked as an archive region by
814 814 // alloc_archive_regions, set it free and empty and uncommit it.
815 815 HeapRegion* curr_region = start_region;
816 816 while (curr_region != NULL) {
817 817 guarantee(curr_region->is_archive(),
818 818 "Expected archive region at index %u", curr_region->hrm_index());
819 819 uint curr_index = curr_region->hrm_index();
820 820 _archive_set.remove(curr_region);
821 821 curr_region->set_free();
822 822 curr_region->set_top(curr_region->bottom());
823 823 if (curr_region != last_region) {
824 824 curr_region = _hrm->next_region_in_heap(curr_region);
825 825 } else {
826 826 curr_region = NULL;
827 827 }
828 828 _hrm->shrink_at(curr_index, 1);
829 829 uncommitted_regions++;
830 830 }
831 831
832 832 // Notify mark-sweep that this is no longer an archive range.
833 833 G1ArchiveAllocator::clear_range_archive(ranges[i]);
834 834 }
835 835
836 836 if (uncommitted_regions != 0) {
837 837 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
838 838 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
839 839 }
840 840 decrease_used(size_used);
841 841 }
842 842
843 843 oop G1CollectedHeap::materialize_archived_object(oop obj) {
844 844 assert(obj != NULL, "archived obj is NULL");
845 845 assert(G1ArchiveAllocator::is_archived_object(obj), "must be archived object");
846 846
847 847 // Loading an archived object makes it strongly reachable. If it is
848 848 // loaded during concurrent marking, it must be enqueued to the SATB
849 849 // queue, shading the previously white object gray.
850 850 G1BarrierSet::enqueue(obj);
851 851
852 852 return obj;
853 853 }
854 854
855 855 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
856 856 ResourceMark rm; // For retrieving the thread names in log messages.
857 857
858 858 // The structure of this method has a lot of similarities to
859 859 // attempt_allocation_slow(). The reason these two were not merged
860 860 // into a single one is that such a method would require several "if
861 861 // allocation is not humongous do this, otherwise do that"
862 862 // conditional paths which would obscure its flow. In fact, an early
863 863 // version of this code did use a unified method which was harder to
864 864 // follow and, as a result, it had subtle bugs that were hard to
865 865 // track down. So keeping these two methods separate allows each to
866 866 // be more readable. It will be good to keep these two in sync as
867 867 // much as possible.
868 868
869 869 assert_heap_not_locked_and_not_at_safepoint();
870 870 assert(is_humongous(word_size), "attempt_allocation_humongous() "
871 871 "should only be called for humongous allocations");
872 872
873 873 // Humongous objects can exhaust the heap quickly, so we should check if we
874 874 // need to start a marking cycle at each humongous object allocation. We do
875 875 // the check before we do the actual allocation. The reason for doing it
876 876 // before the allocation is that we avoid having to keep track of the newly
877 877 // allocated memory while we do a GC.
878 878 if (policy()->need_to_start_conc_mark("concurrent humongous allocation",
879 879 word_size)) {
880 880 collect(GCCause::_g1_humongous_allocation);
881 881 }
882 882
883 883 // We will loop until a) we manage to successfully perform the
884 884 // allocation or b) we successfully schedule a collection which
885 885 // fails to perform the allocation. b) is the only case when we'll
886 886 // return NULL.
887 887 HeapWord* result = NULL;
888 888 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
889 889 bool should_try_gc;
890 890 uint gc_count_before;
891 891
892 892
893 893 {
894 894 MutexLocker x(Heap_lock);
895 895
896 896 // Given that humongous objects are not allocated in young
897 897 // regions, we'll first try to do the allocation without doing a
898 898 // collection hoping that there's enough space in the heap.
899 899 result = humongous_obj_allocate(word_size);
900 900 if (result != NULL) {
901 901 size_t size_in_regions = humongous_obj_size_in_regions(word_size);
902 902 policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
903 903 return result;
904 904 }
905 905
906 906 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
907 907 // the GCLocker initiated GC has been performed and then retry. This includes
908 908 // the case when the GC Locker is not active but has not been performed.
909 909 should_try_gc = !GCLocker::needs_gc();
910 910 // Read the GC count while still holding the Heap_lock.
911 911 gc_count_before = total_collections();
912 912 }
913 913
914 914 if (should_try_gc) {
915 915 bool succeeded;
916 916 result = do_collection_pause(word_size, gc_count_before, &succeeded,
917 917 GCCause::_g1_humongous_allocation);
918 918 if (result != NULL) {
919 919 assert(succeeded, "only way to get back a non-NULL result");
920 920 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
921 921 Thread::current()->name(), p2i(result));
922 922 return result;
923 923 }
924 924
925 925 if (succeeded) {
926 926 // We successfully scheduled a collection which failed to allocate. No
927 927 // point in trying to allocate further. We'll just return NULL.
928 928 log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
929 929 SIZE_FORMAT " words", Thread::current()->name(), word_size);
930 930 return NULL;
931 931 }
932 932 log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
933 933 Thread::current()->name(), word_size);
934 934 } else {
935 935 // Failed to schedule a collection.
936 936 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
937 937 log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
938 938 SIZE_FORMAT " words", Thread::current()->name(), word_size);
939 939 return NULL;
940 940 }
941 941 log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
942 942 // The GCLocker is either active or the GCLocker initiated
943 943 // GC has not yet been performed. Stall until it is and
944 944 // then retry the allocation.
945 945 GCLocker::stall_until_clear();
946 946 gclocker_retry_count += 1;
947 947 }
948 948
949 949
950 950 // We can reach here if we were unsuccessful in scheduling a
951 951 // collection (because another thread beat us to it) or if we were
952 952 // stalled due to the GC locker. In either can we should retry the
953 953 // allocation attempt in case another thread successfully
954 954 // performed a collection and reclaimed enough space.
955 955 // Humongous object allocation always needs a lock, so we wait for the retry
956 956 // in the next iteration of the loop, unlike for the regular iteration case.
957 957 // Give a warning if we seem to be looping forever.
958 958
959 959 if ((QueuedAllocationWarningCount > 0) &&
960 960 (try_count % QueuedAllocationWarningCount == 0)) {
961 961 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
962 962 Thread::current()->name(), try_count, word_size);
963 963 }
964 964 }
965 965
966 966 ShouldNotReachHere();
967 967 return NULL;
968 968 }
969 969
970 970 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
971 971 bool expect_null_mutator_alloc_region) {
972 972 assert_at_safepoint_on_vm_thread();
973 973 assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
974 974 "the current alloc region was unexpectedly found to be non-NULL");
975 975
976 976 if (!is_humongous(word_size)) {
977 977 return _allocator->attempt_allocation_locked(word_size);
978 978 } else {
979 979 HeapWord* result = humongous_obj_allocate(word_size);
980 980 if (result != NULL && policy()->need_to_start_conc_mark("STW humongous allocation")) {
981 981 collector_state()->set_initiate_conc_mark_if_possible(true);
982 982 }
983 983 return result;
984 984 }
985 985
986 986 ShouldNotReachHere();
987 987 }
988 988
989 989 class PostCompactionPrinterClosure: public HeapRegionClosure {
990 990 private:
991 991 G1HRPrinter* _hr_printer;
992 992 public:
993 993 bool do_heap_region(HeapRegion* hr) {
994 994 assert(!hr->is_young(), "not expecting to find young regions");
995 995 _hr_printer->post_compaction(hr);
996 996 return false;
997 997 }
998 998
999 999 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1000 1000 : _hr_printer(hr_printer) { }
1001 1001 };
1002 1002
1003 1003 void G1CollectedHeap::print_hrm_post_compaction() {
1004 1004 if (_hr_printer.is_active()) {
1005 1005 PostCompactionPrinterClosure cl(hr_printer());
1006 1006 heap_region_iterate(&cl);
1007 1007 }
1008 1008 }
1009 1009
1010 1010 void G1CollectedHeap::abort_concurrent_cycle() {
1011 1011 // If we start the compaction before the CM threads finish
1012 1012 // scanning the root regions we might trip them over as we'll
1013 1013 // be moving objects / updating references. So let's wait until
1014 1014 // they are done. By telling them to abort, they should complete
1015 1015 // early.
1016 1016 _cm->root_regions()->abort();
↓ open down ↓ |
1016 lines elided |
↑ open up ↑ |
1017 1017 _cm->root_regions()->wait_until_scan_finished();
1018 1018
1019 1019 // Disable discovery and empty the discovered lists
1020 1020 // for the CM ref processor.
1021 1021 _ref_processor_cm->disable_discovery();
1022 1022 _ref_processor_cm->abandon_partial_discovery();
1023 1023 _ref_processor_cm->verify_no_references_recorded();
1024 1024
1025 1025 // Abandon current iterations of concurrent marking and concurrent
1026 1026 // refinement, if any are in progress.
1027 - concurrent_mark()->concurrent_cycle_abort();
1027 + concurrent_mark()->concurrent_cycle_abort_by_fullgc();
1028 1028 }
1029 1029
1030 1030 void G1CollectedHeap::prepare_heap_for_full_collection() {
1031 1031 // Make sure we'll choose a new allocation region afterwards.
1032 1032 _allocator->release_mutator_alloc_regions();
1033 1033 _allocator->abandon_gc_alloc_regions();
1034 1034
1035 1035 // We may have added regions to the current incremental collection
1036 1036 // set between the last GC or pause and now. We need to clear the
1037 1037 // incremental collection set and then start rebuilding it afresh
1038 1038 // after this full GC.
1039 1039 abandon_collection_set(collection_set());
1040 1040
1041 1041 tear_down_region_sets(false /* free_list_only */);
1042 1042
1043 1043 hrm()->prepare_for_full_collection_start();
1044 1044 }
1045 1045
1046 1046 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1047 1047 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1048 1048 assert_used_and_recalculate_used_equal(this);
1049 1049 _verifier->verify_region_sets_optional();
1050 1050 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1051 1051 _verifier->check_bitmaps("Full GC Start");
1052 1052 }
1053 1053
1054 1054 void G1CollectedHeap::prepare_heap_for_mutators() {
1055 1055 hrm()->prepare_for_full_collection_end();
1056 1056
1057 1057 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1058 1058 ClassLoaderDataGraph::purge();
1059 1059 MetaspaceUtils::verify_metrics();
1060 1060
1061 1061 // Prepare heap for normal collections.
1062 1062 assert(num_free_regions() == 0, "we should not have added any free regions");
1063 1063 rebuild_region_sets(false /* free_list_only */);
1064 1064 abort_refinement();
1065 1065 resize_heap_if_necessary();
1066 1066
1067 1067 // Rebuild the strong code root lists for each region
1068 1068 rebuild_strong_code_roots();
1069 1069
1070 1070 // Purge code root memory
1071 1071 purge_code_root_memory();
1072 1072
1073 1073 // Start a new incremental collection set for the next pause
1074 1074 start_new_collection_set();
1075 1075
1076 1076 _allocator->init_mutator_alloc_regions();
1077 1077
1078 1078 // Post collection state updates.
1079 1079 MetaspaceGC::compute_new_size();
1080 1080 }
1081 1081
1082 1082 void G1CollectedHeap::abort_refinement() {
1083 1083 if (_hot_card_cache->use_cache()) {
1084 1084 _hot_card_cache->reset_hot_cache();
1085 1085 }
1086 1086
1087 1087 // Discard all remembered set updates.
1088 1088 G1BarrierSet::dirty_card_queue_set().abandon_logs();
1089 1089 assert(G1BarrierSet::dirty_card_queue_set().num_cards() == 0,
1090 1090 "DCQS should be empty");
1091 1091 }
1092 1092
1093 1093 void G1CollectedHeap::verify_after_full_collection() {
1094 1094 _hrm->verify_optional();
1095 1095 _verifier->verify_region_sets_optional();
1096 1096 _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1097 1097 // Clear the previous marking bitmap, if needed for bitmap verification.
1098 1098 // Note we cannot do this when we clear the next marking bitmap in
1099 1099 // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1100 1100 // objects marked during a full GC against the previous bitmap.
1101 1101 // But we need to clear it before calling check_bitmaps below since
1102 1102 // the full GC has compacted objects and updated TAMS but not updated
1103 1103 // the prev bitmap.
1104 1104 if (G1VerifyBitmaps) {
1105 1105 GCTraceTime(Debug, gc) tm("Clear Prev Bitmap for Verification");
1106 1106 _cm->clear_prev_bitmap(workers());
1107 1107 }
1108 1108 // This call implicitly verifies that the next bitmap is clear after Full GC.
1109 1109 _verifier->check_bitmaps("Full GC End");
1110 1110
1111 1111 // At this point there should be no regions in the
1112 1112 // entire heap tagged as young.
1113 1113 assert(check_young_list_empty(), "young list should be empty at this point");
1114 1114
1115 1115 // Note: since we've just done a full GC, concurrent
1116 1116 // marking is no longer active. Therefore we need not
1117 1117 // re-enable reference discovery for the CM ref processor.
1118 1118 // That will be done at the start of the next marking cycle.
1119 1119 // We also know that the STW processor should no longer
1120 1120 // discover any new references.
1121 1121 assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
1122 1122 assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");
1123 1123 _ref_processor_stw->verify_no_references_recorded();
1124 1124 _ref_processor_cm->verify_no_references_recorded();
1125 1125 }
1126 1126
1127 1127 void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
1128 1128 // Post collection logging.
1129 1129 // We should do this after we potentially resize the heap so
1130 1130 // that all the COMMIT / UNCOMMIT events are generated before
1131 1131 // the compaction events.
1132 1132 print_hrm_post_compaction();
1133 1133 heap_transition->print();
1134 1134 print_heap_after_gc();
1135 1135 print_heap_regions();
1136 1136 }
1137 1137
1138 1138 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1139 1139 bool clear_all_soft_refs) {
1140 1140 assert_at_safepoint_on_vm_thread();
1141 1141
1142 1142 if (GCLocker::check_active_before_gc()) {
1143 1143 // Full GC was not completed.
1144 1144 return false;
1145 1145 }
1146 1146
1147 1147 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1148 1148 soft_ref_policy()->should_clear_all_soft_refs();
1149 1149
1150 1150 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1151 1151 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1152 1152
1153 1153 collector.prepare_collection();
1154 1154 collector.collect();
1155 1155 collector.complete_collection();
1156 1156
1157 1157 // Full collection was successfully completed.
1158 1158 return true;
1159 1159 }
1160 1160
1161 1161 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1162 1162 // Currently, there is no facility in the do_full_collection(bool) API to notify
1163 1163 // the caller that the collection did not succeed (e.g., because it was locked
1164 1164 // out by the GC locker). So, right now, we'll ignore the return value.
1165 1165 bool dummy = do_full_collection(true, /* explicit_gc */
1166 1166 clear_all_soft_refs);
1167 1167 }
1168 1168
1169 1169 void G1CollectedHeap::resize_heap_if_necessary() {
1170 1170 assert_at_safepoint_on_vm_thread();
1171 1171
1172 1172 // Capacity, free and used after the GC counted as full regions to
1173 1173 // include the waste in the following calculations.
1174 1174 const size_t capacity_after_gc = capacity();
1175 1175 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1176 1176
1177 1177 // This is enforced in arguments.cpp.
1178 1178 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1179 1179 "otherwise the code below doesn't make sense");
1180 1180
1181 1181 // We don't have floating point command-line arguments
1182 1182 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1183 1183 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1184 1184 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1185 1185 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1186 1186
1187 1187 // We have to be careful here as these two calculations can overflow
1188 1188 // 32-bit size_t's.
1189 1189 double used_after_gc_d = (double) used_after_gc;
1190 1190 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1191 1191 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1192 1192
1193 1193 // Let's make sure that they are both under the max heap size, which
1194 1194 // by default will make them fit into a size_t.
1195 1195 double desired_capacity_upper_bound = (double) MaxHeapSize;
1196 1196 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1197 1197 desired_capacity_upper_bound);
1198 1198 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1199 1199 desired_capacity_upper_bound);
1200 1200
1201 1201 // We can now safely turn them into size_t's.
1202 1202 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1203 1203 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1204 1204
1205 1205 // This assert only makes sense here, before we adjust them
1206 1206 // with respect to the min and max heap size.
1207 1207 assert(minimum_desired_capacity <= maximum_desired_capacity,
1208 1208 "minimum_desired_capacity = " SIZE_FORMAT ", "
1209 1209 "maximum_desired_capacity = " SIZE_FORMAT,
1210 1210 minimum_desired_capacity, maximum_desired_capacity);
1211 1211
1212 1212 // Should not be greater than the heap max size. No need to adjust
1213 1213 // it with respect to the heap min size as it's a lower bound (i.e.,
1214 1214 // we'll try to make the capacity larger than it, not smaller).
1215 1215 minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
1216 1216 // Should not be less than the heap min size. No need to adjust it
1217 1217 // with respect to the heap max size as it's an upper bound (i.e.,
1218 1218 // we'll try to make the capacity smaller than it, not greater).
1219 1219 maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
1220 1220
1221 1221 if (capacity_after_gc < minimum_desired_capacity) {
1222 1222 // Don't expand unless it's significant
1223 1223 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1224 1224
1225 1225 log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). "
1226 1226 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1227 1227 "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1228 1228 capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
1229 1229
1230 1230 expand(expand_bytes, _workers);
1231 1231
1232 1232 // No expansion, now see if we want to shrink
1233 1233 } else if (capacity_after_gc > maximum_desired_capacity) {
1234 1234 // Capacity too large, compute shrinking size
1235 1235 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1236 1236
1237 1237 log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). "
1238 1238 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1239 1239 "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1240 1240 capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1241 1241
1242 1242 shrink(shrink_bytes);
1243 1243 }
1244 1244 }
1245 1245
1246 1246 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1247 1247 bool do_gc,
1248 1248 bool clear_all_soft_refs,
1249 1249 bool expect_null_mutator_alloc_region,
1250 1250 bool* gc_succeeded) {
1251 1251 *gc_succeeded = true;
1252 1252 // Let's attempt the allocation first.
1253 1253 HeapWord* result =
1254 1254 attempt_allocation_at_safepoint(word_size,
1255 1255 expect_null_mutator_alloc_region);
1256 1256 if (result != NULL) {
1257 1257 return result;
1258 1258 }
1259 1259
1260 1260 // In a G1 heap, we're supposed to keep allocation from failing by
1261 1261 // incremental pauses. Therefore, at least for now, we'll favor
1262 1262 // expansion over collection. (This might change in the future if we can
1263 1263 // do something smarter than full collection to satisfy a failed alloc.)
1264 1264 result = expand_and_allocate(word_size);
1265 1265 if (result != NULL) {
1266 1266 return result;
1267 1267 }
1268 1268
1269 1269 if (do_gc) {
1270 1270 // Expansion didn't work, we'll try to do a Full GC.
1271 1271 *gc_succeeded = do_full_collection(false, /* explicit_gc */
1272 1272 clear_all_soft_refs);
1273 1273 }
1274 1274
1275 1275 return NULL;
1276 1276 }
1277 1277
1278 1278 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1279 1279 bool* succeeded) {
1280 1280 assert_at_safepoint_on_vm_thread();
1281 1281
1282 1282 // Attempts to allocate followed by Full GC.
1283 1283 HeapWord* result =
1284 1284 satisfy_failed_allocation_helper(word_size,
1285 1285 true, /* do_gc */
1286 1286 false, /* clear_all_soft_refs */
1287 1287 false, /* expect_null_mutator_alloc_region */
1288 1288 succeeded);
1289 1289
1290 1290 if (result != NULL || !*succeeded) {
1291 1291 return result;
1292 1292 }
1293 1293
1294 1294 // Attempts to allocate followed by Full GC that will collect all soft references.
1295 1295 result = satisfy_failed_allocation_helper(word_size,
1296 1296 true, /* do_gc */
1297 1297 true, /* clear_all_soft_refs */
1298 1298 true, /* expect_null_mutator_alloc_region */
1299 1299 succeeded);
1300 1300
1301 1301 if (result != NULL || !*succeeded) {
1302 1302 return result;
1303 1303 }
1304 1304
1305 1305 // Attempts to allocate, no GC
1306 1306 result = satisfy_failed_allocation_helper(word_size,
1307 1307 false, /* do_gc */
1308 1308 false, /* clear_all_soft_refs */
1309 1309 true, /* expect_null_mutator_alloc_region */
1310 1310 succeeded);
1311 1311
1312 1312 if (result != NULL) {
1313 1313 return result;
1314 1314 }
1315 1315
1316 1316 assert(!soft_ref_policy()->should_clear_all_soft_refs(),
1317 1317 "Flag should have been handled and cleared prior to this point");
1318 1318
1319 1319 // What else? We might try synchronous finalization later. If the total
1320 1320 // space available is large enough for the allocation, then a more
1321 1321 // complete compaction phase than we've tried so far might be
1322 1322 // appropriate.
1323 1323 return NULL;
1324 1324 }
1325 1325
1326 1326 // Attempting to expand the heap sufficiently
1327 1327 // to support an allocation of the given "word_size". If
1328 1328 // successful, perform the allocation and return the address of the
1329 1329 // allocated block, or else "NULL".
1330 1330
1331 1331 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1332 1332 assert_at_safepoint_on_vm_thread();
1333 1333
1334 1334 _verifier->verify_region_sets_optional();
1335 1335
1336 1336 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1337 1337 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1338 1338 word_size * HeapWordSize);
1339 1339
1340 1340
1341 1341 if (expand(expand_bytes, _workers)) {
1342 1342 _hrm->verify_optional();
1343 1343 _verifier->verify_region_sets_optional();
1344 1344 return attempt_allocation_at_safepoint(word_size,
1345 1345 false /* expect_null_mutator_alloc_region */);
1346 1346 }
1347 1347 return NULL;
1348 1348 }
1349 1349
1350 1350 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1351 1351 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1352 1352 aligned_expand_bytes = align_up(aligned_expand_bytes,
1353 1353 HeapRegion::GrainBytes);
1354 1354
1355 1355 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1356 1356 expand_bytes, aligned_expand_bytes);
1357 1357
1358 1358 if (is_maximal_no_gc()) {
1359 1359 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1360 1360 return false;
1361 1361 }
1362 1362
1363 1363 double expand_heap_start_time_sec = os::elapsedTime();
1364 1364 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1365 1365 assert(regions_to_expand > 0, "Must expand by at least one region");
1366 1366
1367 1367 uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
1368 1368 if (expand_time_ms != NULL) {
1369 1369 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1370 1370 }
1371 1371
1372 1372 if (expanded_by > 0) {
1373 1373 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1374 1374 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1375 1375 policy()->record_new_heap_size(num_regions());
1376 1376 } else {
1377 1377 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1378 1378
1379 1379 // The expansion of the virtual storage space was unsuccessful.
1380 1380 // Let's see if it was because we ran out of swap.
1381 1381 if (G1ExitOnExpansionFailure &&
1382 1382 _hrm->available() >= regions_to_expand) {
1383 1383 // We had head room...
1384 1384 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1385 1385 }
1386 1386 }
1387 1387 return regions_to_expand > 0;
1388 1388 }
1389 1389
1390 1390 bool G1CollectedHeap::expand_single_region(uint node_index) {
1391 1391 uint expanded_by = _hrm->expand_on_preferred_node(node_index);
1392 1392
1393 1393 if (expanded_by == 0) {
1394 1394 assert(is_maximal_no_gc(), "Should be no regions left, available: %u", _hrm->available());
1395 1395 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1396 1396 return false;
1397 1397 }
1398 1398
1399 1399 policy()->record_new_heap_size(num_regions());
1400 1400 return true;
1401 1401 }
1402 1402
1403 1403 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1404 1404 size_t aligned_shrink_bytes =
1405 1405 ReservedSpace::page_align_size_down(shrink_bytes);
1406 1406 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1407 1407 HeapRegion::GrainBytes);
1408 1408 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1409 1409
1410 1410 uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1411 1411 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1412 1412
1413 1413 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1414 1414 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1415 1415 if (num_regions_removed > 0) {
1416 1416 policy()->record_new_heap_size(num_regions());
1417 1417 } else {
1418 1418 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1419 1419 }
1420 1420 }
1421 1421
1422 1422 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1423 1423 _verifier->verify_region_sets_optional();
1424 1424
1425 1425 // We should only reach here at the end of a Full GC or during Remark which
1426 1426 // means we should not not be holding to any GC alloc regions. The method
1427 1427 // below will make sure of that and do any remaining clean up.
1428 1428 _allocator->abandon_gc_alloc_regions();
1429 1429
1430 1430 // Instead of tearing down / rebuilding the free lists here, we
1431 1431 // could instead use the remove_all_pending() method on free_list to
1432 1432 // remove only the ones that we need to remove.
1433 1433 tear_down_region_sets(true /* free_list_only */);
1434 1434 shrink_helper(shrink_bytes);
1435 1435 rebuild_region_sets(true /* free_list_only */);
1436 1436
1437 1437 _hrm->verify_optional();
1438 1438 _verifier->verify_region_sets_optional();
1439 1439 }
1440 1440
1441 1441 class OldRegionSetChecker : public HeapRegionSetChecker {
1442 1442 public:
1443 1443 void check_mt_safety() {
1444 1444 // Master Old Set MT safety protocol:
1445 1445 // (a) If we're at a safepoint, operations on the master old set
1446 1446 // should be invoked:
1447 1447 // - by the VM thread (which will serialize them), or
1448 1448 // - by the GC workers while holding the FreeList_lock, if we're
1449 1449 // at a safepoint for an evacuation pause (this lock is taken
1450 1450 // anyway when an GC alloc region is retired so that a new one
1451 1451 // is allocated from the free list), or
1452 1452 // - by the GC workers while holding the OldSets_lock, if we're at a
1453 1453 // safepoint for a cleanup pause.
1454 1454 // (b) If we're not at a safepoint, operations on the master old set
1455 1455 // should be invoked while holding the Heap_lock.
1456 1456
1457 1457 if (SafepointSynchronize::is_at_safepoint()) {
1458 1458 guarantee(Thread::current()->is_VM_thread() ||
1459 1459 FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
1460 1460 "master old set MT safety protocol at a safepoint");
1461 1461 } else {
1462 1462 guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
1463 1463 }
1464 1464 }
1465 1465 bool is_correct_type(HeapRegion* hr) { return hr->is_old(); }
1466 1466 const char* get_description() { return "Old Regions"; }
1467 1467 };
1468 1468
1469 1469 class ArchiveRegionSetChecker : public HeapRegionSetChecker {
1470 1470 public:
1471 1471 void check_mt_safety() {
1472 1472 guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(),
1473 1473 "May only change archive regions during initialization or safepoint.");
1474 1474 }
1475 1475 bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); }
1476 1476 const char* get_description() { return "Archive Regions"; }
1477 1477 };
1478 1478
1479 1479 class HumongousRegionSetChecker : public HeapRegionSetChecker {
1480 1480 public:
1481 1481 void check_mt_safety() {
1482 1482 // Humongous Set MT safety protocol:
1483 1483 // (a) If we're at a safepoint, operations on the master humongous
1484 1484 // set should be invoked by either the VM thread (which will
1485 1485 // serialize them) or by the GC workers while holding the
1486 1486 // OldSets_lock.
1487 1487 // (b) If we're not at a safepoint, operations on the master
1488 1488 // humongous set should be invoked while holding the Heap_lock.
1489 1489
1490 1490 if (SafepointSynchronize::is_at_safepoint()) {
1491 1491 guarantee(Thread::current()->is_VM_thread() ||
1492 1492 OldSets_lock->owned_by_self(),
1493 1493 "master humongous set MT safety protocol at a safepoint");
1494 1494 } else {
1495 1495 guarantee(Heap_lock->owned_by_self(),
1496 1496 "master humongous set MT safety protocol outside a safepoint");
1497 1497 }
1498 1498 }
1499 1499 bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1500 1500 const char* get_description() { return "Humongous Regions"; }
1501 1501 };
1502 1502
1503 1503 G1CollectedHeap::G1CollectedHeap() :
1504 1504 CollectedHeap(),
1505 1505 _young_gen_sampling_thread(NULL),
1506 1506 _workers(NULL),
1507 1507 _card_table(NULL),
1508 1508 _soft_ref_policy(),
1509 1509 _old_set("Old Region Set", new OldRegionSetChecker()),
1510 1510 _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1511 1511 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1512 1512 _bot(NULL),
1513 1513 _listener(),
1514 1514 _numa(G1NUMA::create()),
1515 1515 _hrm(NULL),
1516 1516 _allocator(NULL),
1517 1517 _verifier(NULL),
1518 1518 _summary_bytes_used(0),
1519 1519 _bytes_used_during_gc(0),
1520 1520 _archive_allocator(NULL),
1521 1521 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1522 1522 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1523 1523 _expand_heap_after_alloc_failure(true),
1524 1524 _g1mm(NULL),
1525 1525 _humongous_reclaim_candidates(),
1526 1526 _has_humongous_reclaim_candidates(false),
1527 1527 _hr_printer(),
1528 1528 _collector_state(),
1529 1529 _old_marking_cycles_started(0),
1530 1530 _old_marking_cycles_completed(0),
1531 1531 _eden(),
1532 1532 _survivor(),
1533 1533 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1534 1534 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1535 1535 _policy(G1Policy::create_policy(_gc_timer_stw)),
1536 1536 _heap_sizing_policy(NULL),
1537 1537 _collection_set(this, _policy),
1538 1538 _hot_card_cache(NULL),
1539 1539 _rem_set(NULL),
1540 1540 _cm(NULL),
1541 1541 _cm_thread(NULL),
1542 1542 _cr(NULL),
1543 1543 _task_queues(NULL),
1544 1544 _evacuation_failed(false),
1545 1545 _evacuation_failed_info_array(NULL),
1546 1546 _preserved_marks_set(true /* in_c_heap */),
1547 1547 #ifndef PRODUCT
1548 1548 _evacuation_failure_alot_for_current_gc(false),
1549 1549 _evacuation_failure_alot_gc_number(0),
1550 1550 _evacuation_failure_alot_count(0),
1551 1551 #endif
1552 1552 _ref_processor_stw(NULL),
1553 1553 _is_alive_closure_stw(this),
1554 1554 _is_subject_to_discovery_stw(this),
1555 1555 _ref_processor_cm(NULL),
1556 1556 _is_alive_closure_cm(this),
1557 1557 _is_subject_to_discovery_cm(this),
1558 1558 _region_attr() {
1559 1559
1560 1560 _verifier = new G1HeapVerifier(this);
1561 1561
1562 1562 _allocator = new G1Allocator(this);
1563 1563
1564 1564 _heap_sizing_policy = G1HeapSizingPolicy::create(this, _policy->analytics());
1565 1565
1566 1566 _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1567 1567
1568 1568 // Override the default _filler_array_max_size so that no humongous filler
1569 1569 // objects are created.
1570 1570 _filler_array_max_size = _humongous_object_threshold_in_words;
1571 1571
1572 1572 uint n_queues = ParallelGCThreads;
1573 1573 _task_queues = new RefToScanQueueSet(n_queues);
1574 1574
1575 1575 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1576 1576
1577 1577 for (uint i = 0; i < n_queues; i++) {
1578 1578 RefToScanQueue* q = new RefToScanQueue();
1579 1579 q->initialize();
1580 1580 _task_queues->register_queue(i, q);
1581 1581 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1582 1582 }
1583 1583
1584 1584 // Initialize the G1EvacuationFailureALot counters and flags.
1585 1585 NOT_PRODUCT(reset_evacuation_should_fail();)
1586 1586 _gc_tracer_stw->initialize();
1587 1587
1588 1588 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1589 1589 }
1590 1590
1591 1591 static size_t actual_reserved_page_size(ReservedSpace rs) {
1592 1592 size_t page_size = os::vm_page_size();
1593 1593 if (UseLargePages) {
1594 1594 // There are two ways to manage large page memory.
1595 1595 // 1. OS supports committing large page memory.
1596 1596 // 2. OS doesn't support committing large page memory so ReservedSpace manages it.
1597 1597 // And ReservedSpace calls it 'special'. If we failed to set 'special',
1598 1598 // we reserved memory without large page.
1599 1599 if (os::can_commit_large_page_memory() || rs.special()) {
1600 1600 // An alignment at ReservedSpace comes from preferred page size or
1601 1601 // heap alignment, and if the alignment came from heap alignment, it could be
1602 1602 // larger than large pages size. So need to cap with the large page size.
1603 1603 page_size = MIN2(rs.alignment(), os::large_page_size());
1604 1604 }
1605 1605 }
1606 1606
1607 1607 return page_size;
1608 1608 }
1609 1609
1610 1610 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1611 1611 size_t size,
1612 1612 size_t translation_factor) {
1613 1613 size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1614 1614 // Allocate a new reserved space, preferring to use large pages.
1615 1615 ReservedSpace rs(size, preferred_page_size);
1616 1616 size_t page_size = actual_reserved_page_size(rs);
1617 1617 G1RegionToSpaceMapper* result =
1618 1618 G1RegionToSpaceMapper::create_mapper(rs,
1619 1619 size,
1620 1620 page_size,
1621 1621 HeapRegion::GrainBytes,
1622 1622 translation_factor,
1623 1623 mtGC);
1624 1624
1625 1625 os::trace_page_sizes_for_requested_size(description,
1626 1626 size,
1627 1627 preferred_page_size,
1628 1628 page_size,
1629 1629 rs.base(),
1630 1630 rs.size());
1631 1631
1632 1632 return result;
1633 1633 }
1634 1634
1635 1635 jint G1CollectedHeap::initialize_concurrent_refinement() {
1636 1636 jint ecode = JNI_OK;
1637 1637 _cr = G1ConcurrentRefine::create(&ecode);
1638 1638 return ecode;
1639 1639 }
1640 1640
1641 1641 jint G1CollectedHeap::initialize_young_gen_sampling_thread() {
1642 1642 _young_gen_sampling_thread = new G1YoungRemSetSamplingThread();
1643 1643 if (_young_gen_sampling_thread->osthread() == NULL) {
1644 1644 vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
1645 1645 return JNI_ENOMEM;
1646 1646 }
1647 1647 return JNI_OK;
1648 1648 }
1649 1649
1650 1650 jint G1CollectedHeap::initialize() {
1651 1651
1652 1652 // Necessary to satisfy locking discipline assertions.
1653 1653
1654 1654 MutexLocker x(Heap_lock);
1655 1655
1656 1656 // While there are no constraints in the GC code that HeapWordSize
1657 1657 // be any particular value, there are multiple other areas in the
1658 1658 // system which believe this to be true (e.g. oop->object_size in some
1659 1659 // cases incorrectly returns the size in wordSize units rather than
1660 1660 // HeapWordSize).
1661 1661 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1662 1662
1663 1663 size_t init_byte_size = InitialHeapSize;
1664 1664 size_t reserved_byte_size = G1Arguments::heap_reserved_size_bytes();
1665 1665
1666 1666 // Ensure that the sizes are properly aligned.
1667 1667 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1668 1668 Universe::check_alignment(reserved_byte_size, HeapRegion::GrainBytes, "g1 heap");
1669 1669 Universe::check_alignment(reserved_byte_size, HeapAlignment, "g1 heap");
1670 1670
1671 1671 // Reserve the maximum.
1672 1672
1673 1673 // When compressed oops are enabled, the preferred heap base
1674 1674 // is calculated by subtracting the requested size from the
1675 1675 // 32Gb boundary and using the result as the base address for
1676 1676 // heap reservation. If the requested size is not aligned to
1677 1677 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1678 1678 // into the ReservedHeapSpace constructor) then the actual
1679 1679 // base of the reserved heap may end up differing from the
1680 1680 // address that was requested (i.e. the preferred heap base).
1681 1681 // If this happens then we could end up using a non-optimal
1682 1682 // compressed oops mode.
1683 1683
1684 1684 ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
1685 1685 HeapAlignment);
1686 1686
1687 1687 initialize_reserved_region(heap_rs);
1688 1688
1689 1689 // Create the barrier set for the entire reserved region.
1690 1690 G1CardTable* ct = new G1CardTable(heap_rs.region());
1691 1691 ct->initialize();
1692 1692 G1BarrierSet* bs = new G1BarrierSet(ct);
1693 1693 bs->initialize();
1694 1694 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1695 1695 BarrierSet::set_barrier_set(bs);
1696 1696 _card_table = ct;
1697 1697
1698 1698 {
1699 1699 G1SATBMarkQueueSet& satbqs = bs->satb_mark_queue_set();
1700 1700 satbqs.set_process_completed_buffers_threshold(G1SATBProcessCompletedThreshold);
1701 1701 satbqs.set_buffer_enqueue_threshold_percentage(G1SATBBufferEnqueueingThresholdPercent);
1702 1702 }
1703 1703
1704 1704 // Create the hot card cache.
1705 1705 _hot_card_cache = new G1HotCardCache(this);
1706 1706
1707 1707 // Carve out the G1 part of the heap.
1708 1708 ReservedSpace g1_rs = heap_rs.first_part(reserved_byte_size);
1709 1709 size_t page_size = actual_reserved_page_size(heap_rs);
1710 1710 G1RegionToSpaceMapper* heap_storage =
1711 1711 G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
1712 1712 g1_rs.size(),
1713 1713 page_size,
1714 1714 HeapRegion::GrainBytes,
1715 1715 1,
1716 1716 mtJavaHeap);
1717 1717 if(heap_storage == NULL) {
1718 1718 vm_shutdown_during_initialization("Could not initialize G1 heap");
1719 1719 return JNI_ERR;
1720 1720 }
1721 1721
1722 1722 os::trace_page_sizes("Heap",
1723 1723 MinHeapSize,
1724 1724 reserved_byte_size,
1725 1725 page_size,
1726 1726 heap_rs.base(),
1727 1727 heap_rs.size());
1728 1728 heap_storage->set_mapping_changed_listener(&_listener);
1729 1729
1730 1730 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1731 1731 G1RegionToSpaceMapper* bot_storage =
1732 1732 create_aux_memory_mapper("Block Offset Table",
1733 1733 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1734 1734 G1BlockOffsetTable::heap_map_factor());
1735 1735
1736 1736 G1RegionToSpaceMapper* cardtable_storage =
1737 1737 create_aux_memory_mapper("Card Table",
1738 1738 G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
1739 1739 G1CardTable::heap_map_factor());
1740 1740
1741 1741 G1RegionToSpaceMapper* card_counts_storage =
1742 1742 create_aux_memory_mapper("Card Counts Table",
1743 1743 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1744 1744 G1CardCounts::heap_map_factor());
1745 1745
1746 1746 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1747 1747 G1RegionToSpaceMapper* prev_bitmap_storage =
1748 1748 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1749 1749 G1RegionToSpaceMapper* next_bitmap_storage =
1750 1750 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1751 1751
1752 1752 _hrm = HeapRegionManager::create_manager(this);
1753 1753
1754 1754 _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1755 1755 _card_table->initialize(cardtable_storage);
1756 1756
1757 1757 // Do later initialization work for concurrent refinement.
1758 1758 _hot_card_cache->initialize(card_counts_storage);
1759 1759
1760 1760 // 6843694 - ensure that the maximum region index can fit
1761 1761 // in the remembered set structures.
1762 1762 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1763 1763 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1764 1764
1765 1765 // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1766 1766 // start within the first card.
1767 1767 guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1768 1768 // Also create a G1 rem set.
1769 1769 _rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1770 1770 _rem_set->initialize(max_reserved_capacity(), max_regions());
1771 1771
1772 1772 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1773 1773 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1774 1774 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1775 1775 "too many cards per region");
1776 1776
1777 1777 FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
1778 1778
1779 1779 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1780 1780
1781 1781 {
1782 1782 HeapWord* start = _hrm->reserved().start();
1783 1783 HeapWord* end = _hrm->reserved().end();
1784 1784 size_t granularity = HeapRegion::GrainBytes;
1785 1785
1786 1786 _region_attr.initialize(start, end, granularity);
1787 1787 _humongous_reclaim_candidates.initialize(start, end, granularity);
1788 1788 }
1789 1789
1790 1790 _workers = new WorkGang("GC Thread", ParallelGCThreads,
1791 1791 true /* are_GC_task_threads */,
1792 1792 false /* are_ConcurrentGC_threads */);
1793 1793 if (_workers == NULL) {
1794 1794 return JNI_ENOMEM;
1795 1795 }
1796 1796 _workers->initialize_workers();
1797 1797
1798 1798 _numa->set_region_info(HeapRegion::GrainBytes, page_size);
1799 1799
1800 1800 // Create the G1ConcurrentMark data structure and thread.
1801 1801 // (Must do this late, so that "max_regions" is defined.)
1802 1802 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1803 1803 _cm_thread = _cm->cm_thread();
1804 1804
1805 1805 // Now expand into the initial heap size.
1806 1806 if (!expand(init_byte_size, _workers)) {
1807 1807 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1808 1808 return JNI_ENOMEM;
1809 1809 }
1810 1810
1811 1811 // Perform any initialization actions delegated to the policy.
1812 1812 policy()->init(this, &_collection_set);
1813 1813
1814 1814 jint ecode = initialize_concurrent_refinement();
1815 1815 if (ecode != JNI_OK) {
1816 1816 return ecode;
1817 1817 }
1818 1818
1819 1819 ecode = initialize_young_gen_sampling_thread();
1820 1820 if (ecode != JNI_OK) {
1821 1821 return ecode;
1822 1822 }
1823 1823
1824 1824 {
1825 1825 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1826 1826 dcqs.set_process_cards_threshold(concurrent_refine()->yellow_zone());
1827 1827 dcqs.set_max_cards(concurrent_refine()->red_zone());
1828 1828 }
1829 1829
1830 1830 // Here we allocate the dummy HeapRegion that is required by the
1831 1831 // G1AllocRegion class.
1832 1832 HeapRegion* dummy_region = _hrm->get_dummy_region();
1833 1833
1834 1834 // We'll re-use the same region whether the alloc region will
1835 1835 // require BOT updates or not and, if it doesn't, then a non-young
1836 1836 // region will complain that it cannot support allocations without
1837 1837 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1838 1838 dummy_region->set_eden();
1839 1839 // Make sure it's full.
1840 1840 dummy_region->set_top(dummy_region->end());
1841 1841 G1AllocRegion::setup(this, dummy_region);
1842 1842
1843 1843 _allocator->init_mutator_alloc_regions();
1844 1844
1845 1845 // Do create of the monitoring and management support so that
1846 1846 // values in the heap have been properly initialized.
1847 1847 _g1mm = new G1MonitoringSupport(this);
1848 1848
1849 1849 G1StringDedup::initialize();
1850 1850
1851 1851 _preserved_marks_set.init(ParallelGCThreads);
1852 1852
1853 1853 _collection_set.initialize(max_regions());
1854 1854
1855 1855 return JNI_OK;
1856 1856 }
1857 1857
1858 1858 void G1CollectedHeap::stop() {
1859 1859 // Stop all concurrent threads. We do this to make sure these threads
1860 1860 // do not continue to execute and access resources (e.g. logging)
1861 1861 // that are destroyed during shutdown.
1862 1862 _cr->stop();
1863 1863 _young_gen_sampling_thread->stop();
1864 1864 _cm_thread->stop();
1865 1865 if (G1StringDedup::is_enabled()) {
1866 1866 G1StringDedup::stop();
1867 1867 }
1868 1868 }
1869 1869
1870 1870 void G1CollectedHeap::safepoint_synchronize_begin() {
1871 1871 SuspendibleThreadSet::synchronize();
1872 1872 }
1873 1873
1874 1874 void G1CollectedHeap::safepoint_synchronize_end() {
1875 1875 SuspendibleThreadSet::desynchronize();
1876 1876 }
1877 1877
1878 1878 void G1CollectedHeap::post_initialize() {
1879 1879 CollectedHeap::post_initialize();
1880 1880 ref_processing_init();
1881 1881 }
1882 1882
1883 1883 void G1CollectedHeap::ref_processing_init() {
1884 1884 // Reference processing in G1 currently works as follows:
1885 1885 //
1886 1886 // * There are two reference processor instances. One is
1887 1887 // used to record and process discovered references
1888 1888 // during concurrent marking; the other is used to
1889 1889 // record and process references during STW pauses
1890 1890 // (both full and incremental).
1891 1891 // * Both ref processors need to 'span' the entire heap as
1892 1892 // the regions in the collection set may be dotted around.
1893 1893 //
1894 1894 // * For the concurrent marking ref processor:
1895 1895 // * Reference discovery is enabled at initial marking.
1896 1896 // * Reference discovery is disabled and the discovered
1897 1897 // references processed etc during remarking.
1898 1898 // * Reference discovery is MT (see below).
1899 1899 // * Reference discovery requires a barrier (see below).
1900 1900 // * Reference processing may or may not be MT
1901 1901 // (depending on the value of ParallelRefProcEnabled
1902 1902 // and ParallelGCThreads).
1903 1903 // * A full GC disables reference discovery by the CM
1904 1904 // ref processor and abandons any entries on it's
1905 1905 // discovered lists.
1906 1906 //
1907 1907 // * For the STW processor:
1908 1908 // * Non MT discovery is enabled at the start of a full GC.
1909 1909 // * Processing and enqueueing during a full GC is non-MT.
1910 1910 // * During a full GC, references are processed after marking.
1911 1911 //
1912 1912 // * Discovery (may or may not be MT) is enabled at the start
1913 1913 // of an incremental evacuation pause.
1914 1914 // * References are processed near the end of a STW evacuation pause.
1915 1915 // * For both types of GC:
1916 1916 // * Discovery is atomic - i.e. not concurrent.
1917 1917 // * Reference discovery will not need a barrier.
1918 1918
1919 1919 bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
1920 1920
1921 1921 // Concurrent Mark ref processor
1922 1922 _ref_processor_cm =
1923 1923 new ReferenceProcessor(&_is_subject_to_discovery_cm,
1924 1924 mt_processing, // mt processing
1925 1925 ParallelGCThreads, // degree of mt processing
1926 1926 (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery
1927 1927 MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
1928 1928 false, // Reference discovery is not atomic
1929 1929 &_is_alive_closure_cm, // is alive closure
1930 1930 true); // allow changes to number of processing threads
1931 1931
1932 1932 // STW ref processor
1933 1933 _ref_processor_stw =
1934 1934 new ReferenceProcessor(&_is_subject_to_discovery_stw,
1935 1935 mt_processing, // mt processing
1936 1936 ParallelGCThreads, // degree of mt processing
1937 1937 (ParallelGCThreads > 1), // mt discovery
1938 1938 ParallelGCThreads, // degree of mt discovery
1939 1939 true, // Reference discovery is atomic
1940 1940 &_is_alive_closure_stw, // is alive closure
1941 1941 true); // allow changes to number of processing threads
1942 1942 }
1943 1943
1944 1944 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1945 1945 return &_soft_ref_policy;
1946 1946 }
1947 1947
1948 1948 size_t G1CollectedHeap::capacity() const {
1949 1949 return _hrm->length() * HeapRegion::GrainBytes;
1950 1950 }
1951 1951
1952 1952 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1953 1953 return _hrm->total_free_bytes();
1954 1954 }
1955 1955
1956 1956 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id) {
1957 1957 _hot_card_cache->drain(cl, worker_id);
1958 1958 }
1959 1959
1960 1960 // Computes the sum of the storage used by the various regions.
1961 1961 size_t G1CollectedHeap::used() const {
1962 1962 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1963 1963 if (_archive_allocator != NULL) {
1964 1964 result += _archive_allocator->used();
1965 1965 }
1966 1966 return result;
1967 1967 }
1968 1968
1969 1969 size_t G1CollectedHeap::used_unlocked() const {
1970 1970 return _summary_bytes_used;
1971 1971 }
1972 1972
1973 1973 class SumUsedClosure: public HeapRegionClosure {
1974 1974 size_t _used;
1975 1975 public:
1976 1976 SumUsedClosure() : _used(0) {}
1977 1977 bool do_heap_region(HeapRegion* r) {
1978 1978 _used += r->used();
1979 1979 return false;
1980 1980 }
1981 1981 size_t result() { return _used; }
1982 1982 };
1983 1983
1984 1984 size_t G1CollectedHeap::recalculate_used() const {
1985 1985 SumUsedClosure blk;
1986 1986 heap_region_iterate(&blk);
1987 1987 return blk.result();
1988 1988 }
1989 1989
1990 1990 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1991 1991 switch (cause) {
1992 1992 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
1993 1993 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
1994 1994 case GCCause::_wb_conc_mark: return true;
1995 1995 default : return false;
1996 1996 }
1997 1997 }
1998 1998
1999 1999 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2000 2000 switch (cause) {
2001 2001 case GCCause::_g1_humongous_allocation: return true;
2002 2002 case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
2003 2003 case GCCause::_wb_breakpoint: return true;
2004 2004 default: return is_user_requested_concurrent_full_gc(cause);
2005 2005 }
2006 2006 }
2007 2007
2008 2008 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
2009 2009 if (policy()->force_upgrade_to_full()) {
2010 2010 return true;
2011 2011 } else if (should_do_concurrent_full_gc(_gc_cause)) {
2012 2012 return false;
2013 2013 } else if (has_regions_left_for_allocation()) {
2014 2014 return false;
2015 2015 } else {
2016 2016 return true;
2017 2017 }
2018 2018 }
2019 2019
2020 2020 #ifndef PRODUCT
2021 2021 void G1CollectedHeap::allocate_dummy_regions() {
2022 2022 // Let's fill up most of the region
2023 2023 size_t word_size = HeapRegion::GrainWords - 1024;
2024 2024 // And as a result the region we'll allocate will be humongous.
2025 2025 guarantee(is_humongous(word_size), "sanity");
2026 2026
2027 2027 // _filler_array_max_size is set to humongous object threshold
2028 2028 // but temporarily change it to use CollectedHeap::fill_with_object().
2029 2029 SizeTFlagSetting fs(_filler_array_max_size, word_size);
2030 2030
2031 2031 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2032 2032 // Let's use the existing mechanism for the allocation
2033 2033 HeapWord* dummy_obj = humongous_obj_allocate(word_size);
2034 2034 if (dummy_obj != NULL) {
2035 2035 MemRegion mr(dummy_obj, word_size);
2036 2036 CollectedHeap::fill_with_object(mr);
2037 2037 } else {
2038 2038 // If we can't allocate once, we probably cannot allocate
2039 2039 // again. Let's get out of the loop.
2040 2040 break;
2041 2041 }
2042 2042 }
2043 2043 }
2044 2044 #endif // !PRODUCT
2045 2045
2046 2046 void G1CollectedHeap::increment_old_marking_cycles_started() {
2047 2047 assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2048 2048 _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2049 2049 "Wrong marking cycle count (started: %d, completed: %d)",
2050 2050 _old_marking_cycles_started, _old_marking_cycles_completed);
2051 2051
2052 2052 _old_marking_cycles_started++;
2053 2053 }
2054 2054
2055 2055 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2056 2056 MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);
2057 2057
2058 2058 // We assume that if concurrent == true, then the caller is a
2059 2059 // concurrent thread that was joined the Suspendible Thread
2060 2060 // Set. If there's ever a cheap way to check this, we should add an
2061 2061 // assert here.
2062 2062
2063 2063 // Given that this method is called at the end of a Full GC or of a
2064 2064 // concurrent cycle, and those can be nested (i.e., a Full GC can
2065 2065 // interrupt a concurrent cycle), the number of full collections
2066 2066 // completed should be either one (in the case where there was no
2067 2067 // nesting) or two (when a Full GC interrupted a concurrent cycle)
2068 2068 // behind the number of full collections started.
2069 2069
2070 2070 // This is the case for the inner caller, i.e. a Full GC.
2071 2071 assert(concurrent ||
2072 2072 (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2073 2073 (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2074 2074 "for inner caller (Full GC): _old_marking_cycles_started = %u "
2075 2075 "is inconsistent with _old_marking_cycles_completed = %u",
2076 2076 _old_marking_cycles_started, _old_marking_cycles_completed);
2077 2077
2078 2078 // This is the case for the outer caller, i.e. the concurrent cycle.
2079 2079 assert(!concurrent ||
2080 2080 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2081 2081 "for outer caller (concurrent cycle): "
2082 2082 "_old_marking_cycles_started = %u "
2083 2083 "is inconsistent with _old_marking_cycles_completed = %u",
2084 2084 _old_marking_cycles_started, _old_marking_cycles_completed);
2085 2085
2086 2086 _old_marking_cycles_completed += 1;
2087 2087
2088 2088 // We need to clear the "in_progress" flag in the CM thread before
2089 2089 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2090 2090 // is set) so that if a waiter requests another System.gc() it doesn't
2091 2091 // incorrectly see that a marking cycle is still in progress.
2092 2092 if (concurrent) {
2093 2093 _cm_thread->set_idle();
2094 2094 }
2095 2095
2096 2096 // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
2097 2097 // for a full GC to finish that their wait is over.
2098 2098 ml.notify_all();
2099 2099 }
2100 2100
2101 2101 void G1CollectedHeap::collect(GCCause::Cause cause) {
2102 2102 try_collect(cause);
2103 2103 }
2104 2104
2105 2105 // Return true if (x < y) with allowance for wraparound.
2106 2106 static bool gc_counter_less_than(uint x, uint y) {
2107 2107 return (x - y) > (UINT_MAX/2);
2108 2108 }
2109 2109
2110 2110 // LOG_COLLECT_CONCURRENTLY(cause, msg, args...)
2111 2111 // Macro so msg printing is format-checked.
2112 2112 #define LOG_COLLECT_CONCURRENTLY(cause, ...) \
2113 2113 do { \
2114 2114 LogTarget(Trace, gc) LOG_COLLECT_CONCURRENTLY_lt; \
2115 2115 if (LOG_COLLECT_CONCURRENTLY_lt.is_enabled()) { \
2116 2116 ResourceMark rm; /* For thread name. */ \
2117 2117 LogStream LOG_COLLECT_CONCURRENTLY_s(&LOG_COLLECT_CONCURRENTLY_lt); \
2118 2118 LOG_COLLECT_CONCURRENTLY_s.print("%s: Try Collect Concurrently (%s): ", \
2119 2119 Thread::current()->name(), \
2120 2120 GCCause::to_string(cause)); \
2121 2121 LOG_COLLECT_CONCURRENTLY_s.print(__VA_ARGS__); \
2122 2122 } \
2123 2123 } while (0)
2124 2124
2125 2125 #define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
2126 2126 LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
2127 2127
2128 2128 bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
2129 2129 uint gc_counter,
2130 2130 uint old_marking_started_before) {
2131 2131 assert_heap_not_locked();
2132 2132 assert(should_do_concurrent_full_gc(cause),
2133 2133 "Non-concurrent cause %s", GCCause::to_string(cause));
2134 2134
2135 2135 for (uint i = 1; true; ++i) {
2136 2136 // Try to schedule an initial-mark evacuation pause that will
2137 2137 // start a concurrent cycle.
2138 2138 LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i);
2139 2139 VM_G1TryInitiateConcMark op(gc_counter,
2140 2140 cause,
2141 2141 policy()->max_pause_time_ms());
2142 2142 VMThread::execute(&op);
2143 2143
2144 2144 // Request is trivially finished.
2145 2145 if (cause == GCCause::_g1_periodic_collection) {
2146 2146 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, op.gc_succeeded());
2147 2147 return op.gc_succeeded();
2148 2148 }
2149 2149
2150 2150 // If VMOp skipped initiating concurrent marking cycle because
2151 2151 // we're terminating, then we're done.
2152 2152 if (op.terminating()) {
2153 2153 LOG_COLLECT_CONCURRENTLY(cause, "skipped: terminating");
2154 2154 return false;
2155 2155 }
2156 2156
2157 2157 // Lock to get consistent set of values.
2158 2158 uint old_marking_started_after;
2159 2159 uint old_marking_completed_after;
2160 2160 {
2161 2161 MutexLocker ml(Heap_lock);
2162 2162 // Update gc_counter for retrying VMOp if needed. Captured here to be
2163 2163 // consistent with the values we use below for termination tests. If
2164 2164 // a retry is needed after a possible wait, and another collection
2165 2165 // occurs in the meantime, it will cause our retry to be skipped and
2166 2166 // we'll recheck for termination with updated conditions from that
2167 2167 // more recent collection. That's what we want, rather than having
2168 2168 // our retry possibly perform an unnecessary collection.
2169 2169 gc_counter = total_collections();
2170 2170 old_marking_started_after = _old_marking_cycles_started;
2171 2171 old_marking_completed_after = _old_marking_cycles_completed;
2172 2172 }
2173 2173
2174 2174 if (cause == GCCause::_wb_breakpoint) {
2175 2175 if (op.gc_succeeded()) {
2176 2176 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
2177 2177 return true;
2178 2178 }
2179 2179 // When _wb_breakpoint there can't be another cycle or deferred.
2180 2180 assert(!op.cycle_already_in_progress(), "invariant");
2181 2181 assert(!op.whitebox_attached(), "invariant");
2182 2182 // Concurrent cycle attempt might have been cancelled by some other
2183 2183 // collection, so retry. Unlike other cases below, we want to retry
2184 2184 // even if cancelled by a STW full collection, because we really want
2185 2185 // to start a concurrent cycle.
2186 2186 if (old_marking_started_before != old_marking_started_after) {
2187 2187 LOG_COLLECT_CONCURRENTLY(cause, "ignoring STW full GC");
2188 2188 old_marking_started_before = old_marking_started_after;
2189 2189 }
2190 2190 } else if (!GCCause::is_user_requested_gc(cause)) {
2191 2191 // For an "automatic" (not user-requested) collection, we just need to
2192 2192 // ensure that progress is made.
2193 2193 //
2194 2194 // Request is finished if any of
2195 2195 // (1) the VMOp successfully performed a GC,
2196 2196 // (2) a concurrent cycle was already in progress,
2197 2197 // (3) whitebox is controlling concurrent cycles,
2198 2198 // (4) a new cycle was started (by this thread or some other), or
2199 2199 // (5) a Full GC was performed.
2200 2200 // Cases (4) and (5) are detected together by a change to
2201 2201 // _old_marking_cycles_started.
2202 2202 //
2203 2203 // Note that (1) does not imply (4). If we're still in the mixed
2204 2204 // phase of an earlier concurrent collection, the request to make the
2205 2205 // collection an initial-mark won't be honored. If we don't check for
2206 2206 // both conditions we'll spin doing back-to-back collections.
2207 2207 if (op.gc_succeeded() ||
2208 2208 op.cycle_already_in_progress() ||
2209 2209 op.whitebox_attached() ||
2210 2210 (old_marking_started_before != old_marking_started_after)) {
2211 2211 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
2212 2212 return true;
2213 2213 }
2214 2214 } else { // User-requested GC.
2215 2215 // For a user-requested collection, we want to ensure that a complete
2216 2216 // full collection has been performed before returning, but without
2217 2217 // waiting for more than needed.
2218 2218
2219 2219 // For user-requested GCs (unlike non-UR), a successful VMOp implies a
2220 2220 // new cycle was started. That's good, because it's not clear what we
2221 2221 // should do otherwise. Trying again just does back to back GCs.
2222 2222 // Can't wait for someone else to start a cycle. And returning fails
2223 2223 // to meet the goal of ensuring a full collection was performed.
2224 2224 assert(!op.gc_succeeded() ||
2225 2225 (old_marking_started_before != old_marking_started_after),
2226 2226 "invariant: succeeded %s, started before %u, started after %u",
2227 2227 BOOL_TO_STR(op.gc_succeeded()),
2228 2228 old_marking_started_before, old_marking_started_after);
2229 2229
2230 2230 // Request is finished if a full collection (concurrent or stw)
2231 2231 // was started after this request and has completed, e.g.
2232 2232 // started_before < completed_after.
2233 2233 if (gc_counter_less_than(old_marking_started_before,
2234 2234 old_marking_completed_after)) {
2235 2235 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
2236 2236 return true;
2237 2237 }
2238 2238
2239 2239 if (old_marking_started_after != old_marking_completed_after) {
2240 2240 // If there is an in-progress cycle (possibly started by us), then
2241 2241 // wait for that cycle to complete, e.g.
2242 2242 // while completed_now < started_after.
2243 2243 LOG_COLLECT_CONCURRENTLY(cause, "wait");
2244 2244 MonitorLocker ml(G1OldGCCount_lock);
2245 2245 while (gc_counter_less_than(_old_marking_cycles_completed,
2246 2246 old_marking_started_after)) {
2247 2247 ml.wait();
2248 2248 }
2249 2249 // Request is finished if the collection we just waited for was
2250 2250 // started after this request.
2251 2251 if (old_marking_started_before != old_marking_started_after) {
2252 2252 LOG_COLLECT_CONCURRENTLY(cause, "complete after wait");
2253 2253 return true;
2254 2254 }
2255 2255 }
2256 2256
2257 2257 // If VMOp was successful then it started a new cycle that the above
2258 2258 // wait &etc should have recognized as finishing this request. This
2259 2259 // differs from a non-user-request, where gc_succeeded does not imply
2260 2260 // a new cycle was started.
2261 2261 assert(!op.gc_succeeded(), "invariant");
2262 2262
2263 2263 if (op.cycle_already_in_progress()) {
2264 2264 // If VMOp failed because a cycle was already in progress, it
2265 2265 // is now complete. But it didn't finish this user-requested
2266 2266 // GC, so try again.
2267 2267 LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
2268 2268 continue;
2269 2269 } else if (op.whitebox_attached()) {
2270 2270 // If WhiteBox wants control, wait for notification of a state
2271 2271 // change in the controller, then try again. Don't wait for
2272 2272 // release of control, since collections may complete while in
2273 2273 // control. Note: This won't recognize a STW full collection
2274 2274 // while waiting; we can't wait on multiple monitors.
2275 2275 LOG_COLLECT_CONCURRENTLY(cause, "whitebox control stall");
2276 2276 MonitorLocker ml(ConcurrentGCBreakpoints::monitor());
2277 2277 if (ConcurrentGCBreakpoints::is_controlled()) {
2278 2278 ml.wait();
2279 2279 }
2280 2280 continue;
2281 2281 }
2282 2282 }
2283 2283
2284 2284 // Collection failed and should be retried.
2285 2285 assert(op.transient_failure(), "invariant");
2286 2286
2287 2287 if (GCLocker::is_active_and_needs_gc()) {
2288 2288 // If GCLocker is active, wait until clear before retrying.
2289 2289 LOG_COLLECT_CONCURRENTLY(cause, "gc-locker stall");
2290 2290 GCLocker::stall_until_clear();
2291 2291 }
2292 2292
2293 2293 LOG_COLLECT_CONCURRENTLY(cause, "retry");
2294 2294 }
2295 2295 }
2296 2296
2297 2297 bool G1CollectedHeap::try_collect(GCCause::Cause cause) {
2298 2298 assert_heap_not_locked();
2299 2299
2300 2300 // Lock to get consistent set of values.
2301 2301 uint gc_count_before;
2302 2302 uint full_gc_count_before;
2303 2303 uint old_marking_started_before;
2304 2304 {
2305 2305 MutexLocker ml(Heap_lock);
2306 2306 gc_count_before = total_collections();
2307 2307 full_gc_count_before = total_full_collections();
2308 2308 old_marking_started_before = _old_marking_cycles_started;
2309 2309 }
2310 2310
2311 2311 if (should_do_concurrent_full_gc(cause)) {
2312 2312 return try_collect_concurrently(cause,
2313 2313 gc_count_before,
2314 2314 old_marking_started_before);
2315 2315 } else if (GCLocker::should_discard(cause, gc_count_before)) {
2316 2316 // Indicate failure to be consistent with VMOp failure due to
2317 2317 // another collection slipping in after our gc_count but before
2318 2318 // our request is processed.
2319 2319 return false;
2320 2320 } else if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2321 2321 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2322 2322
2323 2323 // Schedule a standard evacuation pause. We're setting word_size
2324 2324 // to 0 which means that we are not requesting a post-GC allocation.
2325 2325 VM_G1CollectForAllocation op(0, /* word_size */
2326 2326 gc_count_before,
2327 2327 cause,
2328 2328 policy()->max_pause_time_ms());
2329 2329 VMThread::execute(&op);
2330 2330 return op.gc_succeeded();
2331 2331 } else {
2332 2332 // Schedule a Full GC.
2333 2333 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2334 2334 VMThread::execute(&op);
2335 2335 return op.gc_succeeded();
2336 2336 }
2337 2337 }
2338 2338
2339 2339 bool G1CollectedHeap::is_in(const void* p) const {
2340 2340 if (_hrm->reserved().contains(p)) {
2341 2341 // Given that we know that p is in the reserved space,
2342 2342 // heap_region_containing() should successfully
2343 2343 // return the containing region.
2344 2344 HeapRegion* hr = heap_region_containing(p);
2345 2345 return hr->is_in(p);
2346 2346 } else {
2347 2347 return false;
2348 2348 }
2349 2349 }
2350 2350
2351 2351 #ifdef ASSERT
2352 2352 bool G1CollectedHeap::is_in_exact(const void* p) const {
2353 2353 bool contains = reserved_region().contains(p);
2354 2354 bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
2355 2355 if (contains && available) {
2356 2356 return true;
2357 2357 } else {
2358 2358 return false;
2359 2359 }
2360 2360 }
2361 2361 #endif
2362 2362
2363 2363 // Iteration functions.
2364 2364
2365 2365 // Iterates an ObjectClosure over all objects within a HeapRegion.
2366 2366
2367 2367 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2368 2368 ObjectClosure* _cl;
2369 2369 public:
2370 2370 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2371 2371 bool do_heap_region(HeapRegion* r) {
2372 2372 if (!r->is_continues_humongous()) {
2373 2373 r->object_iterate(_cl);
2374 2374 }
2375 2375 return false;
2376 2376 }
2377 2377 };
2378 2378
2379 2379 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2380 2380 IterateObjectClosureRegionClosure blk(cl);
2381 2381 heap_region_iterate(&blk);
2382 2382 }
2383 2383
2384 2384 void G1CollectedHeap::keep_alive(oop obj) {
2385 2385 G1BarrierSet::enqueue(obj);
2386 2386 }
2387 2387
2388 2388 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2389 2389 _hrm->iterate(cl);
2390 2390 }
2391 2391
2392 2392 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2393 2393 HeapRegionClaimer *hrclaimer,
2394 2394 uint worker_id) const {
2395 2395 _hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2396 2396 }
2397 2397
2398 2398 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
2399 2399 HeapRegionClaimer *hrclaimer) const {
2400 2400 _hrm->par_iterate(cl, hrclaimer, 0);
2401 2401 }
2402 2402
2403 2403 void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) {
2404 2404 _collection_set.iterate(cl);
2405 2405 }
2406 2406
2407 2407 void G1CollectedHeap::collection_set_par_iterate_all(HeapRegionClosure* cl, HeapRegionClaimer* hr_claimer, uint worker_id) {
2408 2408 _collection_set.par_iterate(cl, hr_claimer, worker_id, workers()->active_workers());
2409 2409 }
2410 2410
2411 2411 void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl, HeapRegionClaimer* hr_claimer, uint worker_id) {
2412 2412 _collection_set.iterate_incremental_part_from(cl, hr_claimer, worker_id, workers()->active_workers());
2413 2413 }
2414 2414
2415 2415 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2416 2416 HeapRegion* hr = heap_region_containing(addr);
2417 2417 return hr->block_start(addr);
2418 2418 }
2419 2419
2420 2420 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2421 2421 HeapRegion* hr = heap_region_containing(addr);
2422 2422 return hr->block_is_obj(addr);
2423 2423 }
2424 2424
2425 2425 bool G1CollectedHeap::supports_tlab_allocation() const {
2426 2426 return true;
2427 2427 }
2428 2428
2429 2429 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2430 2430 return (_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2431 2431 }
2432 2432
2433 2433 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2434 2434 return _eden.length() * HeapRegion::GrainBytes;
2435 2435 }
2436 2436
2437 2437 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2438 2438 // must be equal to the humongous object limit.
2439 2439 size_t G1CollectedHeap::max_tlab_size() const {
2440 2440 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2441 2441 }
2442 2442
2443 2443 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2444 2444 return _allocator->unsafe_max_tlab_alloc();
2445 2445 }
2446 2446
2447 2447 size_t G1CollectedHeap::max_capacity() const {
2448 2448 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2449 2449 }
2450 2450
2451 2451 size_t G1CollectedHeap::max_reserved_capacity() const {
2452 2452 return _hrm->max_length() * HeapRegion::GrainBytes;
2453 2453 }
2454 2454
2455 2455 jlong G1CollectedHeap::millis_since_last_gc() {
2456 2456 // See the notes in GenCollectedHeap::millis_since_last_gc()
2457 2457 // for more information about the implementation.
2458 2458 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2459 2459 _policy->collection_pause_end_millis();
2460 2460 if (ret_val < 0) {
2461 2461 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2462 2462 ". returning zero instead.", ret_val);
2463 2463 return 0;
2464 2464 }
2465 2465 return ret_val;
2466 2466 }
2467 2467
2468 2468 void G1CollectedHeap::deduplicate_string(oop str) {
2469 2469 assert(java_lang_String::is_instance(str), "invariant");
2470 2470
2471 2471 if (G1StringDedup::is_enabled()) {
2472 2472 G1StringDedup::deduplicate(str);
2473 2473 }
2474 2474 }
2475 2475
2476 2476 void G1CollectedHeap::prepare_for_verify() {
2477 2477 _verifier->prepare_for_verify();
2478 2478 }
2479 2479
2480 2480 void G1CollectedHeap::verify(VerifyOption vo) {
2481 2481 _verifier->verify(vo);
2482 2482 }
2483 2483
2484 2484 bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
2485 2485 return true;
2486 2486 }
2487 2487
2488 2488 bool G1CollectedHeap::is_heterogeneous_heap() const {
2489 2489 return G1Arguments::is_heterogeneous_heap();
2490 2490 }
2491 2491
2492 2492 class PrintRegionClosure: public HeapRegionClosure {
2493 2493 outputStream* _st;
2494 2494 public:
2495 2495 PrintRegionClosure(outputStream* st) : _st(st) {}
2496 2496 bool do_heap_region(HeapRegion* r) {
2497 2497 r->print_on(_st);
2498 2498 return false;
2499 2499 }
2500 2500 };
2501 2501
2502 2502 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2503 2503 const HeapRegion* hr,
2504 2504 const VerifyOption vo) const {
2505 2505 switch (vo) {
2506 2506 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2507 2507 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2508 2508 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
2509 2509 default: ShouldNotReachHere();
2510 2510 }
2511 2511 return false; // keep some compilers happy
2512 2512 }
2513 2513
2514 2514 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2515 2515 const VerifyOption vo) const {
2516 2516 switch (vo) {
2517 2517 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
2518 2518 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
2519 2519 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj);
2520 2520 default: ShouldNotReachHere();
2521 2521 }
2522 2522 return false; // keep some compilers happy
2523 2523 }
2524 2524
2525 2525 void G1CollectedHeap::print_heap_regions() const {
2526 2526 LogTarget(Trace, gc, heap, region) lt;
2527 2527 if (lt.is_enabled()) {
2528 2528 LogStream ls(lt);
2529 2529 print_regions_on(&ls);
2530 2530 }
2531 2531 }
2532 2532
2533 2533 void G1CollectedHeap::print_on(outputStream* st) const {
2534 2534 st->print(" %-20s", "garbage-first heap");
2535 2535 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
2536 2536 capacity()/K, used_unlocked()/K);
2537 2537 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
2538 2538 p2i(_hrm->reserved().start()),
2539 2539 p2i(_hrm->reserved().end()));
2540 2540 st->cr();
2541 2541 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
2542 2542 uint young_regions = young_regions_count();
2543 2543 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
2544 2544 (size_t) young_regions * HeapRegion::GrainBytes / K);
2545 2545 uint survivor_regions = survivor_regions_count();
2546 2546 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
2547 2547 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
2548 2548 st->cr();
2549 2549 if (_numa->is_enabled()) {
2550 2550 uint num_nodes = _numa->num_active_nodes();
2551 2551 st->print(" remaining free region(s) on each NUMA node: ");
2552 2552 const int* node_ids = _numa->node_ids();
2553 2553 for (uint node_index = 0; node_index < num_nodes; node_index++) {
2554 2554 st->print("%d=%u ", node_ids[node_index], _hrm->num_free_regions(node_index));
2555 2555 }
2556 2556 st->cr();
2557 2557 }
2558 2558 MetaspaceUtils::print_on(st);
2559 2559 }
2560 2560
2561 2561 void G1CollectedHeap::print_regions_on(outputStream* st) const {
2562 2562 st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
2563 2563 "HS=humongous(starts), HC=humongous(continues), "
2564 2564 "CS=collection set, F=free, "
2565 2565 "OA=open archive, CA=closed archive, "
2566 2566 "TAMS=top-at-mark-start (previous, next)");
2567 2567 PrintRegionClosure blk(st);
2568 2568 heap_region_iterate(&blk);
2569 2569 }
2570 2570
2571 2571 void G1CollectedHeap::print_extended_on(outputStream* st) const {
2572 2572 print_on(st);
2573 2573
2574 2574 // Print the per-region information.
2575 2575 print_regions_on(st);
2576 2576 }
2577 2577
2578 2578 void G1CollectedHeap::print_on_error(outputStream* st) const {
2579 2579 this->CollectedHeap::print_on_error(st);
2580 2580
2581 2581 if (_cm != NULL) {
2582 2582 st->cr();
2583 2583 _cm->print_on_error(st);
2584 2584 }
2585 2585 }
2586 2586
2587 2587 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2588 2588 workers()->print_worker_threads_on(st);
2589 2589 _cm_thread->print_on(st);
2590 2590 st->cr();
2591 2591 _cm->print_worker_threads_on(st);
2592 2592 _cr->print_threads_on(st);
2593 2593 _young_gen_sampling_thread->print_on(st);
2594 2594 if (G1StringDedup::is_enabled()) {
2595 2595 G1StringDedup::print_worker_threads_on(st);
2596 2596 }
2597 2597 }
2598 2598
2599 2599 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2600 2600 workers()->threads_do(tc);
2601 2601 tc->do_thread(_cm_thread);
2602 2602 _cm->threads_do(tc);
2603 2603 _cr->threads_do(tc);
2604 2604 tc->do_thread(_young_gen_sampling_thread);
2605 2605 if (G1StringDedup::is_enabled()) {
2606 2606 G1StringDedup::threads_do(tc);
2607 2607 }
2608 2608 }
2609 2609
2610 2610 void G1CollectedHeap::print_tracing_info() const {
2611 2611 rem_set()->print_summary_info();
2612 2612 concurrent_mark()->print_summary_info();
2613 2613 }
2614 2614
2615 2615 #ifndef PRODUCT
2616 2616 // Helpful for debugging RSet issues.
2617 2617
2618 2618 class PrintRSetsClosure : public HeapRegionClosure {
2619 2619 private:
2620 2620 const char* _msg;
2621 2621 size_t _occupied_sum;
2622 2622
2623 2623 public:
2624 2624 bool do_heap_region(HeapRegion* r) {
2625 2625 HeapRegionRemSet* hrrs = r->rem_set();
2626 2626 size_t occupied = hrrs->occupied();
2627 2627 _occupied_sum += occupied;
2628 2628
2629 2629 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2630 2630 if (occupied == 0) {
2631 2631 tty->print_cr(" RSet is empty");
2632 2632 } else {
2633 2633 hrrs->print();
2634 2634 }
2635 2635 tty->print_cr("----------");
2636 2636 return false;
2637 2637 }
2638 2638
2639 2639 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
2640 2640 tty->cr();
2641 2641 tty->print_cr("========================================");
2642 2642 tty->print_cr("%s", msg);
2643 2643 tty->cr();
2644 2644 }
2645 2645
2646 2646 ~PrintRSetsClosure() {
2647 2647 tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
2648 2648 tty->print_cr("========================================");
2649 2649 tty->cr();
2650 2650 }
2651 2651 };
2652 2652
2653 2653 void G1CollectedHeap::print_cset_rsets() {
2654 2654 PrintRSetsClosure cl("Printing CSet RSets");
2655 2655 collection_set_iterate_all(&cl);
2656 2656 }
2657 2657
2658 2658 void G1CollectedHeap::print_all_rsets() {
2659 2659 PrintRSetsClosure cl("Printing All RSets");;
2660 2660 heap_region_iterate(&cl);
2661 2661 }
2662 2662 #endif // PRODUCT
2663 2663
2664 2664 bool G1CollectedHeap::print_location(outputStream* st, void* addr) const {
2665 2665 return BlockLocationPrinter<G1CollectedHeap>::print_location(st, addr);
2666 2666 }
2667 2667
2668 2668 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2669 2669
2670 2670 size_t eden_used_bytes = _eden.used_bytes();
2671 2671 size_t survivor_used_bytes = _survivor.used_bytes();
2672 2672 size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2673 2673
2674 2674 size_t eden_capacity_bytes =
2675 2675 (policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2676 2676
2677 2677 VirtualSpaceSummary heap_summary = create_heap_space_summary();
2678 2678 return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2679 2679 eden_capacity_bytes, survivor_used_bytes, num_regions());
2680 2680 }
2681 2681
2682 2682 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2683 2683 return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2684 2684 stats->unused(), stats->used(), stats->region_end_waste(),
2685 2685 stats->regions_filled(), stats->direct_allocated(),
2686 2686 stats->failure_used(), stats->failure_waste());
2687 2687 }
2688 2688
2689 2689 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2690 2690 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2691 2691 gc_tracer->report_gc_heap_summary(when, heap_summary);
2692 2692
2693 2693 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2694 2694 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2695 2695 }
2696 2696
2697 2697 G1CollectedHeap* G1CollectedHeap::heap() {
2698 2698 CollectedHeap* heap = Universe::heap();
2699 2699 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2700 2700 assert(heap->kind() == CollectedHeap::G1, "Invalid name");
2701 2701 return (G1CollectedHeap*)heap;
2702 2702 }
2703 2703
2704 2704 void G1CollectedHeap::gc_prologue(bool full) {
2705 2705 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2706 2706
2707 2707 // This summary needs to be printed before incrementing total collections.
2708 2708 rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2709 2709
2710 2710 // Update common counters.
2711 2711 increment_total_collections(full /* full gc */);
2712 2712 if (full || collector_state()->in_initial_mark_gc()) {
2713 2713 increment_old_marking_cycles_started();
2714 2714 }
2715 2715
2716 2716 // Fill TLAB's and such
2717 2717 double start = os::elapsedTime();
2718 2718 ensure_parsability(true);
2719 2719 phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2720 2720 }
2721 2721
2722 2722 void G1CollectedHeap::gc_epilogue(bool full) {
2723 2723 // Update common counters.
2724 2724 if (full) {
2725 2725 // Update the number of full collections that have been completed.
2726 2726 increment_old_marking_cycles_completed(false /* concurrent */);
2727 2727 }
2728 2728
2729 2729 // We are at the end of the GC. Total collections has already been increased.
2730 2730 rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2731 2731
2732 2732 // FIXME: what is this about?
2733 2733 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2734 2734 // is set.
2735 2735 #if COMPILER2_OR_JVMCI
2736 2736 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2737 2737 #endif
2738 2738
2739 2739 double start = os::elapsedTime();
2740 2740 resize_all_tlabs();
2741 2741 phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2742 2742
2743 2743 MemoryService::track_memory_usage();
2744 2744 // We have just completed a GC. Update the soft reference
2745 2745 // policy with the new heap occupancy
2746 2746 Universe::update_heap_info_at_gc();
2747 2747
2748 2748 // Print NUMA statistics.
2749 2749 _numa->print_statistics();
2750 2750 }
2751 2751
2752 2752 void G1CollectedHeap::verify_numa_regions(const char* desc) {
2753 2753 LogTarget(Trace, gc, heap, verify) lt;
2754 2754
2755 2755 if (lt.is_enabled()) {
2756 2756 LogStream ls(lt);
2757 2757 // Iterate all heap regions to print matching between preferred numa id and actual numa id.
2758 2758 G1NodeIndexCheckClosure cl(desc, _numa, &ls);
2759 2759 heap_region_iterate(&cl);
2760 2760 }
2761 2761 }
2762 2762
2763 2763 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2764 2764 uint gc_count_before,
2765 2765 bool* succeeded,
2766 2766 GCCause::Cause gc_cause) {
2767 2767 assert_heap_not_locked_and_not_at_safepoint();
2768 2768 VM_G1CollectForAllocation op(word_size,
2769 2769 gc_count_before,
2770 2770 gc_cause,
2771 2771 policy()->max_pause_time_ms());
2772 2772 VMThread::execute(&op);
2773 2773
2774 2774 HeapWord* result = op.result();
2775 2775 bool ret_succeeded = op.prologue_succeeded() && op.gc_succeeded();
2776 2776 assert(result == NULL || ret_succeeded,
2777 2777 "the result should be NULL if the VM did not succeed");
2778 2778 *succeeded = ret_succeeded;
2779 2779
2780 2780 assert_heap_not_locked();
2781 2781 return result;
2782 2782 }
2783 2783
2784 2784 void G1CollectedHeap::do_concurrent_mark() {
2785 2785 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
2786 2786 if (!_cm_thread->in_progress()) {
2787 2787 _cm_thread->set_started();
2788 2788 CGC_lock->notify();
2789 2789 }
2790 2790 }
2791 2791
2792 2792 size_t G1CollectedHeap::pending_card_num() {
2793 2793 struct CountCardsClosure : public ThreadClosure {
2794 2794 size_t _cards;
2795 2795 CountCardsClosure() : _cards(0) {}
2796 2796 virtual void do_thread(Thread* t) {
2797 2797 _cards += G1ThreadLocalData::dirty_card_queue(t).size();
2798 2798 }
2799 2799 } count_from_threads;
2800 2800 Threads::threads_do(&count_from_threads);
2801 2801
2802 2802 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2803 2803 return dcqs.num_cards() + count_from_threads._cards;
2804 2804 }
2805 2805
2806 2806 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2807 2807 // We don't nominate objects with many remembered set entries, on
2808 2808 // the assumption that such objects are likely still live.
2809 2809 HeapRegionRemSet* rem_set = r->rem_set();
2810 2810
2811 2811 return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2812 2812 rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2813 2813 G1EagerReclaimHumongousObjects && rem_set->is_empty();
2814 2814 }
2815 2815
2816 2816 #ifndef PRODUCT
2817 2817 void G1CollectedHeap::verify_region_attr_remset_update() {
2818 2818 class VerifyRegionAttrRemSet : public HeapRegionClosure {
2819 2819 public:
2820 2820 virtual bool do_heap_region(HeapRegion* r) {
2821 2821 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2822 2822 bool const needs_remset_update = g1h->region_attr(r->bottom()).needs_remset_update();
2823 2823 assert(r->rem_set()->is_tracked() == needs_remset_update,
2824 2824 "Region %u remset tracking status (%s) different to region attribute (%s)",
2825 2825 r->hrm_index(), BOOL_TO_STR(r->rem_set()->is_tracked()), BOOL_TO_STR(needs_remset_update));
2826 2826 return false;
2827 2827 }
2828 2828 } cl;
2829 2829 heap_region_iterate(&cl);
2830 2830 }
2831 2831 #endif
2832 2832
2833 2833 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2834 2834 public:
2835 2835 bool do_heap_region(HeapRegion* hr) {
2836 2836 if (!hr->is_archive() && !hr->is_continues_humongous()) {
2837 2837 hr->verify_rem_set();
2838 2838 }
2839 2839 return false;
2840 2840 }
2841 2841 };
2842 2842
2843 2843 uint G1CollectedHeap::num_task_queues() const {
2844 2844 return _task_queues->size();
2845 2845 }
2846 2846
2847 2847 #if TASKQUEUE_STATS
2848 2848 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2849 2849 st->print_raw_cr("GC Task Stats");
2850 2850 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2851 2851 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2852 2852 }
2853 2853
2854 2854 void G1CollectedHeap::print_taskqueue_stats() const {
2855 2855 if (!log_is_enabled(Trace, gc, task, stats)) {
2856 2856 return;
2857 2857 }
2858 2858 Log(gc, task, stats) log;
2859 2859 ResourceMark rm;
2860 2860 LogStream ls(log.trace());
2861 2861 outputStream* st = &ls;
2862 2862
2863 2863 print_taskqueue_stats_hdr(st);
2864 2864
2865 2865 TaskQueueStats totals;
2866 2866 const uint n = num_task_queues();
2867 2867 for (uint i = 0; i < n; ++i) {
2868 2868 st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
2869 2869 totals += task_queue(i)->stats;
2870 2870 }
2871 2871 st->print_raw("tot "); totals.print(st); st->cr();
2872 2872
2873 2873 DEBUG_ONLY(totals.verify());
2874 2874 }
2875 2875
2876 2876 void G1CollectedHeap::reset_taskqueue_stats() {
2877 2877 const uint n = num_task_queues();
2878 2878 for (uint i = 0; i < n; ++i) {
2879 2879 task_queue(i)->stats.reset();
2880 2880 }
2881 2881 }
2882 2882 #endif // TASKQUEUE_STATS
2883 2883
2884 2884 void G1CollectedHeap::wait_for_root_region_scanning() {
2885 2885 double scan_wait_start = os::elapsedTime();
2886 2886 // We have to wait until the CM threads finish scanning the
2887 2887 // root regions as it's the only way to ensure that all the
2888 2888 // objects on them have been correctly scanned before we start
2889 2889 // moving them during the GC.
2890 2890 bool waited = _cm->root_regions()->wait_until_scan_finished();
2891 2891 double wait_time_ms = 0.0;
2892 2892 if (waited) {
2893 2893 double scan_wait_end = os::elapsedTime();
2894 2894 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2895 2895 }
2896 2896 phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2897 2897 }
2898 2898
2899 2899 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2900 2900 private:
2901 2901 G1HRPrinter* _hr_printer;
2902 2902 public:
2903 2903 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2904 2904
2905 2905 virtual bool do_heap_region(HeapRegion* r) {
2906 2906 _hr_printer->cset(r);
2907 2907 return false;
2908 2908 }
2909 2909 };
2910 2910
2911 2911 void G1CollectedHeap::start_new_collection_set() {
2912 2912 double start = os::elapsedTime();
2913 2913
2914 2914 collection_set()->start_incremental_building();
2915 2915
2916 2916 clear_region_attr();
2917 2917
2918 2918 guarantee(_eden.length() == 0, "eden should have been cleared");
2919 2919 policy()->transfer_survivors_to_cset(survivor());
2920 2920
2921 2921 // We redo the verification but now wrt to the new CSet which
2922 2922 // has just got initialized after the previous CSet was freed.
2923 2923 _cm->verify_no_collection_set_oops();
2924 2924
2925 2925 phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
2926 2926 }
2927 2927
2928 2928 void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) {
2929 2929
2930 2930 _collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
2931 2931 evacuation_info.set_collectionset_regions(collection_set()->region_length() +
2932 2932 collection_set()->optional_region_length());
2933 2933
2934 2934 _cm->verify_no_collection_set_oops();
2935 2935
2936 2936 if (_hr_printer.is_active()) {
2937 2937 G1PrintCollectionSetClosure cl(&_hr_printer);
2938 2938 _collection_set.iterate(&cl);
2939 2939 _collection_set.iterate_optional(&cl);
2940 2940 }
2941 2941 }
2942 2942
2943 2943 G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const {
2944 2944 if (collector_state()->in_initial_mark_gc()) {
2945 2945 return G1HeapVerifier::G1VerifyConcurrentStart;
2946 2946 } else if (collector_state()->in_young_only_phase()) {
2947 2947 return G1HeapVerifier::G1VerifyYoungNormal;
2948 2948 } else {
2949 2949 return G1HeapVerifier::G1VerifyMixed;
2950 2950 }
2951 2951 }
2952 2952
2953 2953 void G1CollectedHeap::verify_before_young_collection(G1HeapVerifier::G1VerifyType type) {
2954 2954 if (VerifyRememberedSets) {
2955 2955 log_info(gc, verify)("[Verifying RemSets before GC]");
2956 2956 VerifyRegionRemSetClosure v_cl;
2957 2957 heap_region_iterate(&v_cl);
2958 2958 }
2959 2959 _verifier->verify_before_gc(type);
2960 2960 _verifier->check_bitmaps("GC Start");
2961 2961 verify_numa_regions("GC Start");
2962 2962 }
2963 2963
2964 2964 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2965 2965 if (VerifyRememberedSets) {
2966 2966 log_info(gc, verify)("[Verifying RemSets after GC]");
2967 2967 VerifyRegionRemSetClosure v_cl;
2968 2968 heap_region_iterate(&v_cl);
2969 2969 }
2970 2970 _verifier->verify_after_gc(type);
2971 2971 _verifier->check_bitmaps("GC End");
2972 2972 verify_numa_regions("GC End");
2973 2973 }
2974 2974
2975 2975 void G1CollectedHeap::expand_heap_after_young_collection(){
2976 2976 size_t expand_bytes = _heap_sizing_policy->expansion_amount();
2977 2977 if (expand_bytes > 0) {
2978 2978 // No need for an ergo logging here,
2979 2979 // expansion_amount() does this when it returns a value > 0.
2980 2980 double expand_ms;
2981 2981 if (!expand(expand_bytes, _workers, &expand_ms)) {
2982 2982 // We failed to expand the heap. Cannot do anything about it.
2983 2983 }
2984 2984 phase_times()->record_expand_heap_time(expand_ms);
2985 2985 }
2986 2986 }
2987 2987
2988 2988 const char* G1CollectedHeap::young_gc_name() const {
2989 2989 if (collector_state()->in_initial_mark_gc()) {
2990 2990 return "Pause Young (Concurrent Start)";
2991 2991 } else if (collector_state()->in_young_only_phase()) {
2992 2992 if (collector_state()->in_young_gc_before_mixed()) {
2993 2993 return "Pause Young (Prepare Mixed)";
2994 2994 } else {
2995 2995 return "Pause Young (Normal)";
2996 2996 }
2997 2997 } else {
2998 2998 return "Pause Young (Mixed)";
2999 2999 }
3000 3000 }
3001 3001
3002 3002 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3003 3003 assert_at_safepoint_on_vm_thread();
3004 3004 guarantee(!is_gc_active(), "collection is not reentrant");
3005 3005
3006 3006 if (GCLocker::check_active_before_gc()) {
3007 3007 return false;
3008 3008 }
3009 3009
3010 3010 do_collection_pause_at_safepoint_helper(target_pause_time_ms);
3011 3011 if (should_upgrade_to_full_gc(gc_cause())) {
3012 3012 log_info(gc, ergo)("Attempting maximally compacting collection");
3013 3013 bool result = do_full_collection(false /* explicit gc */,
3014 3014 true /* clear_all_soft_refs */);
3015 3015 // do_full_collection only fails if blocked by GC locker, but
3016 3016 // we've already checked for that above.
3017 3017 assert(result, "invariant");
3018 3018 }
3019 3019 return true;
3020 3020 }
3021 3021
3022 3022 void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
3023 3023 GCIdMark gc_id_mark;
3024 3024
3025 3025 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3026 3026 ResourceMark rm;
3027 3027
3028 3028 policy()->note_gc_start();
3029 3029
3030 3030 _gc_timer_stw->register_gc_start();
3031 3031 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3032 3032
3033 3033 wait_for_root_region_scanning();
3034 3034
3035 3035 print_heap_before_gc();
3036 3036 print_heap_regions();
3037 3037 trace_heap_before_gc(_gc_tracer_stw);
3038 3038
3039 3039 _verifier->verify_region_sets_optional();
3040 3040 _verifier->verify_dirty_young_regions();
3041 3041
3042 3042 // We should not be doing initial mark unless the conc mark thread is running
3043 3043 if (!_cm_thread->should_terminate()) {
3044 3044 // This call will decide whether this pause is an initial-mark
3045 3045 // pause. If it is, in_initial_mark_gc() will return true
3046 3046 // for the duration of this pause.
3047 3047 policy()->decide_on_conc_mark_initiation();
3048 3048 }
3049 3049
3050 3050 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3051 3051 assert(!collector_state()->in_initial_mark_gc() ||
3052 3052 collector_state()->in_young_only_phase(), "sanity");
3053 3053 // We also do not allow mixed GCs during marking.
3054 3054 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
3055 3055
3056 3056 // Record whether this pause is an initial mark. When the current
3057 3057 // thread has completed its logging output and it's safe to signal
3058 3058 // the CM thread, the flag's value in the policy has been reset.
3059 3059 bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
3060 3060 if (should_start_conc_mark) {
3061 3061 _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
3062 3062 }
3063 3063
3064 3064 // Inner scope for scope based logging, timers, and stats collection
3065 3065 {
3066 3066 G1EvacuationInfo evacuation_info;
3067 3067
3068 3068 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3069 3069
3070 3070 GCTraceCPUTime tcpu;
3071 3071
3072 3072 GCTraceTime(Info, gc) tm(young_gc_name(), NULL, gc_cause(), true);
3073 3073
3074 3074 uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
3075 3075 workers()->active_workers(),
3076 3076 Threads::number_of_non_daemon_threads());
3077 3077 active_workers = workers()->update_active_workers(active_workers);
3078 3078 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
3079 3079
3080 3080 G1MonitoringScope ms(g1mm(),
3081 3081 false /* full_gc */,
3082 3082 collector_state()->yc_type() == Mixed /* all_memory_pools_affected */);
3083 3083
3084 3084 G1HeapTransition heap_transition(this);
3085 3085
3086 3086 {
3087 3087 IsGCActiveMark x;
3088 3088
3089 3089 gc_prologue(false);
3090 3090
3091 3091 G1HeapVerifier::G1VerifyType verify_type = young_collection_verify_type();
3092 3092 verify_before_young_collection(verify_type);
3093 3093
3094 3094 {
3095 3095 // The elapsed time induced by the start time below deliberately elides
3096 3096 // the possible verification above.
3097 3097 double sample_start_time_sec = os::elapsedTime();
3098 3098
3099 3099 // Please see comment in g1CollectedHeap.hpp and
3100 3100 // G1CollectedHeap::ref_processing_init() to see how
3101 3101 // reference processing currently works in G1.
3102 3102 _ref_processor_stw->enable_discovery();
3103 3103
3104 3104 // We want to temporarily turn off discovery by the
3105 3105 // CM ref processor, if necessary, and turn it back on
3106 3106 // on again later if we do. Using a scoped
3107 3107 // NoRefDiscovery object will do this.
3108 3108 NoRefDiscovery no_cm_discovery(_ref_processor_cm);
3109 3109
3110 3110 policy()->record_collection_pause_start(sample_start_time_sec);
3111 3111
3112 3112 // Forget the current allocation region (we might even choose it to be part
3113 3113 // of the collection set!).
3114 3114 _allocator->release_mutator_alloc_regions();
3115 3115
3116 3116 calculate_collection_set(evacuation_info, target_pause_time_ms);
3117 3117
3118 3118 G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
3119 3119 G1ParScanThreadStateSet per_thread_states(this,
3120 3120 &rdcqs,
3121 3121 workers()->active_workers(),
3122 3122 collection_set()->young_region_length(),
3123 3123 collection_set()->optional_region_length());
3124 3124 pre_evacuate_collection_set(evacuation_info, &per_thread_states);
3125 3125
3126 3126 // Actually do the work...
3127 3127 evacuate_initial_collection_set(&per_thread_states);
3128 3128
↓ open down ↓ |
2091 lines elided |
↑ open up ↑ |
3129 3129 if (_collection_set.optional_region_length() != 0) {
3130 3130 evacuate_optional_collection_set(&per_thread_states);
3131 3131 }
3132 3132 post_evacuate_collection_set(evacuation_info, &rdcqs, &per_thread_states);
3133 3133
3134 3134 start_new_collection_set();
3135 3135
3136 3136 _survivor_evac_stats.adjust_desired_plab_sz();
3137 3137 _old_evac_stats.adjust_desired_plab_sz();
3138 3138
3139 - if (should_start_conc_mark) {
3139 + if (gc_cause() == GCCause::_g1_humongous_allocation && collector_state()->in_initial_mark_gc()) {
3140 + // Check if we still need to do concurrent mark after evacuation
3141 + // Abort concurrent mark in case we cleaned humongous objects via eager reclaim
3142 + if (!policy()->need_to_start_conc_mark("end of GC")) {
3143 + concurrent_mark()->concurrent_cycle_abort_by_initial_mark();
3144 + }
3145 + }
3146 +
3147 + if (should_start_conc_mark && !concurrent_mark()->aborted_by_initial_mark()) {
3140 3148 // We have to do this before we notify the CM threads that
3141 3149 // they can start working to make sure that all the
3142 3150 // appropriate initialization is done on the CM object.
3143 3151 concurrent_mark()->post_initial_mark();
3144 3152 // Note that we don't actually trigger the CM thread at
3145 3153 // this point. We do that later when we're sure that
3146 3154 // the current thread has completed its logging output.
3147 3155 }
3148 3156
3149 3157 allocate_dummy_regions();
3150 3158
3151 3159 _allocator->init_mutator_alloc_regions();
3152 3160
3153 3161 expand_heap_after_young_collection();
3154 3162
3155 3163 double sample_end_time_sec = os::elapsedTime();
3156 3164 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3157 3165 policy()->record_collection_pause_end(pause_time_ms);
3158 3166 }
3159 3167
3160 3168 verify_after_young_collection(verify_type);
3161 3169
3162 3170 gc_epilogue(false);
3163 3171 }
3164 3172
3165 3173 // Print the remainder of the GC log output.
3166 3174 if (evacuation_failed()) {
3167 3175 log_info(gc)("To-space exhausted");
3168 3176 }
3169 3177
3170 3178 policy()->print_phases();
3171 3179 heap_transition.print();
3172 3180
3173 3181 _hrm->verify_optional();
3174 3182 _verifier->verify_region_sets_optional();
3175 3183
3176 3184 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3177 3185 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3178 3186
3179 3187 print_heap_after_gc();
3180 3188 print_heap_regions();
3181 3189 trace_heap_after_gc(_gc_tracer_stw);
3182 3190
3183 3191 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3184 3192 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3185 3193 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3186 3194 // before any GC notifications are raised.
3187 3195 g1mm()->update_sizes();
3188 3196
3189 3197 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3190 3198 _gc_tracer_stw->report_tenuring_threshold(_policy->tenuring_threshold());
3191 3199 _gc_timer_stw->register_gc_end();
3192 3200 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3193 3201 }
3194 3202 // It should now be safe to tell the concurrent mark thread to start
3195 3203 // without its logging output interfering with the logging output
3196 3204 // that came from the pause.
3197 3205
3198 3206 if (should_start_conc_mark) {
3199 3207 // CAUTION: after the doConcurrentMark() call below, the concurrent marking
3200 3208 // thread(s) could be running concurrently with us. Make sure that anything
3201 3209 // after this point does not assume that we are the only GC thread running.
3202 3210 // Note: of course, the actual marking work will not start until the safepoint
3203 3211 // itself is released in SuspendibleThreadSet::desynchronize().
3204 3212 do_concurrent_mark();
3205 3213 ConcurrentGCBreakpoints::notify_idle_to_active();
3206 3214 }
3207 3215 }
3208 3216
3209 3217 void G1CollectedHeap::remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs) {
3210 3218 G1ParRemoveSelfForwardPtrsTask rsfp_task(rdcqs);
3211 3219 workers()->run_task(&rsfp_task);
3212 3220 }
3213 3221
3214 3222 void G1CollectedHeap::restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs) {
3215 3223 double remove_self_forwards_start = os::elapsedTime();
3216 3224
3217 3225 remove_self_forwarding_pointers(rdcqs);
3218 3226 _preserved_marks_set.restore(workers());
3219 3227
3220 3228 phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
3221 3229 }
3222 3230
3223 3231 void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m) {
3224 3232 if (!_evacuation_failed) {
3225 3233 _evacuation_failed = true;
3226 3234 }
3227 3235
3228 3236 _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
3229 3237 _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
3230 3238 }
3231 3239
3232 3240 bool G1ParEvacuateFollowersClosure::offer_termination() {
3233 3241 EventGCPhaseParallel event;
3234 3242 G1ParScanThreadState* const pss = par_scan_state();
3235 3243 start_term_time();
3236 3244 const bool res = terminator()->offer_termination();
3237 3245 end_term_time();
3238 3246 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
3239 3247 return res;
3240 3248 }
3241 3249
3242 3250 void G1ParEvacuateFollowersClosure::do_void() {
3243 3251 EventGCPhaseParallel event;
3244 3252 G1ParScanThreadState* const pss = par_scan_state();
3245 3253 pss->trim_queue();
3246 3254 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
3247 3255 do {
3248 3256 EventGCPhaseParallel event;
3249 3257 pss->steal_and_trim_queue(queues());
3250 3258 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
3251 3259 } while (!offer_termination());
3252 3260 }
3253 3261
3254 3262 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
3255 3263 bool class_unloading_occurred) {
3256 3264 uint num_workers = workers()->active_workers();
3257 3265 G1ParallelCleaningTask unlink_task(is_alive, num_workers, class_unloading_occurred, false);
3258 3266 workers()->run_task(&unlink_task);
3259 3267 }
3260 3268
3261 3269 // Clean string dedup data structures.
3262 3270 // Ideally we would prefer to use a StringDedupCleaningTask here, but we want to
3263 3271 // record the durations of the phases. Hence the almost-copy.
3264 3272 class G1StringDedupCleaningTask : public AbstractGangTask {
3265 3273 BoolObjectClosure* _is_alive;
3266 3274 OopClosure* _keep_alive;
3267 3275 G1GCPhaseTimes* _phase_times;
3268 3276
3269 3277 public:
3270 3278 G1StringDedupCleaningTask(BoolObjectClosure* is_alive,
3271 3279 OopClosure* keep_alive,
3272 3280 G1GCPhaseTimes* phase_times) :
3273 3281 AbstractGangTask("Partial Cleaning Task"),
3274 3282 _is_alive(is_alive),
3275 3283 _keep_alive(keep_alive),
3276 3284 _phase_times(phase_times)
3277 3285 {
3278 3286 assert(G1StringDedup::is_enabled(), "String deduplication disabled.");
3279 3287 StringDedup::gc_prologue(true);
3280 3288 }
3281 3289
3282 3290 ~G1StringDedupCleaningTask() {
3283 3291 StringDedup::gc_epilogue();
3284 3292 }
3285 3293
3286 3294 void work(uint worker_id) {
3287 3295 StringDedupUnlinkOrOopsDoClosure cl(_is_alive, _keep_alive);
3288 3296 {
3289 3297 G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupQueueFixup, worker_id);
3290 3298 StringDedupQueue::unlink_or_oops_do(&cl);
3291 3299 }
3292 3300 {
3293 3301 G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
3294 3302 StringDedupTable::unlink_or_oops_do(&cl, worker_id);
3295 3303 }
3296 3304 }
3297 3305 };
3298 3306
3299 3307 void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive,
3300 3308 OopClosure* keep_alive,
3301 3309 G1GCPhaseTimes* phase_times) {
3302 3310 G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times);
3303 3311 workers()->run_task(&cl);
3304 3312 }
3305 3313
3306 3314 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3307 3315 private:
3308 3316 G1RedirtyCardsQueueSet* _qset;
3309 3317 G1CollectedHeap* _g1h;
3310 3318 BufferNode* volatile _nodes;
3311 3319
3312 3320 void par_apply(RedirtyLoggedCardTableEntryClosure* cl, uint worker_id) {
3313 3321 size_t buffer_size = _qset->buffer_size();
3314 3322 BufferNode* next = Atomic::load(&_nodes);
3315 3323 while (next != NULL) {
3316 3324 BufferNode* node = next;
3317 3325 next = Atomic::cmpxchg(&_nodes, node, node->next());
3318 3326 if (next == node) {
3319 3327 cl->apply_to_buffer(node, buffer_size, worker_id);
3320 3328 next = node->next();
3321 3329 }
3322 3330 }
3323 3331 }
3324 3332
3325 3333 public:
3326 3334 G1RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* qset, G1CollectedHeap* g1h) :
3327 3335 AbstractGangTask("Redirty Cards"),
3328 3336 _qset(qset), _g1h(g1h), _nodes(qset->all_completed_buffers()) { }
3329 3337
3330 3338 virtual void work(uint worker_id) {
3331 3339 G1GCPhaseTimes* p = _g1h->phase_times();
3332 3340 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::RedirtyCards, worker_id);
3333 3341
3334 3342 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3335 3343 par_apply(&cl, worker_id);
3336 3344
3337 3345 p->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3338 3346 }
3339 3347 };
3340 3348
3341 3349 void G1CollectedHeap::redirty_logged_cards(G1RedirtyCardsQueueSet* rdcqs) {
3342 3350 double redirty_logged_cards_start = os::elapsedTime();
3343 3351
3344 3352 G1RedirtyLoggedCardsTask redirty_task(rdcqs, this);
3345 3353 workers()->run_task(&redirty_task);
3346 3354
3347 3355 G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3348 3356 dcq.merge_bufferlists(rdcqs);
3349 3357
3350 3358 phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3351 3359 }
3352 3360
3353 3361 // Weak Reference Processing support
3354 3362
3355 3363 bool G1STWIsAliveClosure::do_object_b(oop p) {
3356 3364 // An object is reachable if it is outside the collection set,
3357 3365 // or is inside and copied.
3358 3366 return !_g1h->is_in_cset(p) || p->is_forwarded();
3359 3367 }
3360 3368
3361 3369 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
3362 3370 assert(obj != NULL, "must not be NULL");
3363 3371 assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
3364 3372 // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
3365 3373 // may falsely indicate that this is not the case here: however the collection set only
3366 3374 // contains old regions when concurrent mark is not running.
3367 3375 return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();
3368 3376 }
3369 3377
3370 3378 // Non Copying Keep Alive closure
3371 3379 class G1KeepAliveClosure: public OopClosure {
3372 3380 G1CollectedHeap*_g1h;
3373 3381 public:
3374 3382 G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
3375 3383 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3376 3384 void do_oop(oop* p) {
3377 3385 oop obj = *p;
3378 3386 assert(obj != NULL, "the caller should have filtered out NULL values");
3379 3387
3380 3388 const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
3381 3389 if (!region_attr.is_in_cset_or_humongous()) {
3382 3390 return;
3383 3391 }
3384 3392 if (region_attr.is_in_cset()) {
3385 3393 assert( obj->is_forwarded(), "invariant" );
3386 3394 *p = obj->forwardee();
3387 3395 } else {
3388 3396 assert(!obj->is_forwarded(), "invariant" );
3389 3397 assert(region_attr.is_humongous(),
3390 3398 "Only allowed G1HeapRegionAttr state is IsHumongous, but is %d", region_attr.type());
3391 3399 _g1h->set_humongous_is_live(obj);
3392 3400 }
3393 3401 }
3394 3402 };
3395 3403
3396 3404 // Copying Keep Alive closure - can be called from both
3397 3405 // serial and parallel code as long as different worker
3398 3406 // threads utilize different G1ParScanThreadState instances
3399 3407 // and different queues.
3400 3408
3401 3409 class G1CopyingKeepAliveClosure: public OopClosure {
3402 3410 G1CollectedHeap* _g1h;
3403 3411 G1ParScanThreadState* _par_scan_state;
3404 3412
3405 3413 public:
3406 3414 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
3407 3415 G1ParScanThreadState* pss):
3408 3416 _g1h(g1h),
3409 3417 _par_scan_state(pss)
3410 3418 {}
3411 3419
3412 3420 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3413 3421 virtual void do_oop( oop* p) { do_oop_work(p); }
3414 3422
3415 3423 template <class T> void do_oop_work(T* p) {
3416 3424 oop obj = RawAccess<>::oop_load(p);
3417 3425
3418 3426 if (_g1h->is_in_cset_or_humongous(obj)) {
3419 3427 // If the referent object has been forwarded (either copied
3420 3428 // to a new location or to itself in the event of an
3421 3429 // evacuation failure) then we need to update the reference
3422 3430 // field and, if both reference and referent are in the G1
3423 3431 // heap, update the RSet for the referent.
3424 3432 //
3425 3433 // If the referent has not been forwarded then we have to keep
3426 3434 // it alive by policy. Therefore we have copy the referent.
3427 3435 //
3428 3436 // When the queue is drained (after each phase of reference processing)
3429 3437 // the object and it's followers will be copied, the reference field set
3430 3438 // to point to the new location, and the RSet updated.
3431 3439 _par_scan_state->push_on_queue(p);
3432 3440 }
3433 3441 }
3434 3442 };
3435 3443
3436 3444 // Serial drain queue closure. Called as the 'complete_gc'
3437 3445 // closure for each discovered list in some of the
3438 3446 // reference processing phases.
3439 3447
3440 3448 class G1STWDrainQueueClosure: public VoidClosure {
3441 3449 protected:
3442 3450 G1CollectedHeap* _g1h;
3443 3451 G1ParScanThreadState* _par_scan_state;
3444 3452
3445 3453 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
3446 3454
3447 3455 public:
3448 3456 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
3449 3457 _g1h(g1h),
3450 3458 _par_scan_state(pss)
3451 3459 { }
3452 3460
3453 3461 void do_void() {
3454 3462 G1ParScanThreadState* const pss = par_scan_state();
3455 3463 pss->trim_queue();
3456 3464 }
3457 3465 };
3458 3466
3459 3467 // Parallel Reference Processing closures
3460 3468
3461 3469 // Implementation of AbstractRefProcTaskExecutor for parallel reference
3462 3470 // processing during G1 evacuation pauses.
3463 3471
3464 3472 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
3465 3473 private:
3466 3474 G1CollectedHeap* _g1h;
3467 3475 G1ParScanThreadStateSet* _pss;
3468 3476 RefToScanQueueSet* _queues;
3469 3477 WorkGang* _workers;
3470 3478
3471 3479 public:
3472 3480 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
3473 3481 G1ParScanThreadStateSet* per_thread_states,
3474 3482 WorkGang* workers,
3475 3483 RefToScanQueueSet *task_queues) :
3476 3484 _g1h(g1h),
3477 3485 _pss(per_thread_states),
3478 3486 _queues(task_queues),
3479 3487 _workers(workers)
3480 3488 {
3481 3489 g1h->ref_processor_stw()->set_active_mt_degree(workers->active_workers());
3482 3490 }
3483 3491
3484 3492 // Executes the given task using concurrent marking worker threads.
3485 3493 virtual void execute(ProcessTask& task, uint ergo_workers);
3486 3494 };
3487 3495
3488 3496 // Gang task for possibly parallel reference processing
3489 3497
3490 3498 class G1STWRefProcTaskProxy: public AbstractGangTask {
3491 3499 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
3492 3500 ProcessTask& _proc_task;
3493 3501 G1CollectedHeap* _g1h;
3494 3502 G1ParScanThreadStateSet* _pss;
3495 3503 RefToScanQueueSet* _task_queues;
3496 3504 TaskTerminator* _terminator;
3497 3505
3498 3506 public:
3499 3507 G1STWRefProcTaskProxy(ProcessTask& proc_task,
3500 3508 G1CollectedHeap* g1h,
3501 3509 G1ParScanThreadStateSet* per_thread_states,
3502 3510 RefToScanQueueSet *task_queues,
3503 3511 TaskTerminator* terminator) :
3504 3512 AbstractGangTask("Process reference objects in parallel"),
3505 3513 _proc_task(proc_task),
3506 3514 _g1h(g1h),
3507 3515 _pss(per_thread_states),
3508 3516 _task_queues(task_queues),
3509 3517 _terminator(terminator)
3510 3518 {}
3511 3519
3512 3520 virtual void work(uint worker_id) {
3513 3521 // The reference processing task executed by a single worker.
3514 3522 ResourceMark rm;
3515 3523 HandleMark hm;
3516 3524
3517 3525 G1STWIsAliveClosure is_alive(_g1h);
3518 3526
3519 3527 G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
3520 3528 pss->set_ref_discoverer(NULL);
3521 3529
3522 3530 // Keep alive closure.
3523 3531 G1CopyingKeepAliveClosure keep_alive(_g1h, pss);
3524 3532
3525 3533 // Complete GC closure
3526 3534 G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator, G1GCPhaseTimes::ObjCopy);
3527 3535
3528 3536 // Call the reference processing task's work routine.
3529 3537 _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
3530 3538
3531 3539 // Note we cannot assert that the refs array is empty here as not all
3532 3540 // of the processing tasks (specifically phase2 - pp2_work) execute
3533 3541 // the complete_gc closure (which ordinarily would drain the queue) so
3534 3542 // the queue may not be empty.
3535 3543 }
3536 3544 };
3537 3545
3538 3546 // Driver routine for parallel reference processing.
3539 3547 // Creates an instance of the ref processing gang
3540 3548 // task and has the worker threads execute it.
3541 3549 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
3542 3550 assert(_workers != NULL, "Need parallel worker threads.");
3543 3551
3544 3552 assert(_workers->active_workers() >= ergo_workers,
3545 3553 "Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)",
3546 3554 ergo_workers, _workers->active_workers());
3547 3555 TaskTerminator terminator(ergo_workers, _queues);
3548 3556 G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
3549 3557
3550 3558 _workers->run_task(&proc_task_proxy, ergo_workers);
3551 3559 }
3552 3560
3553 3561 // End of weak reference support closures
3554 3562
3555 3563 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
3556 3564 double ref_proc_start = os::elapsedTime();
3557 3565
3558 3566 ReferenceProcessor* rp = _ref_processor_stw;
3559 3567 assert(rp->discovery_enabled(), "should have been enabled");
3560 3568
3561 3569 // Closure to test whether a referent is alive.
3562 3570 G1STWIsAliveClosure is_alive(this);
3563 3571
3564 3572 // Even when parallel reference processing is enabled, the processing
3565 3573 // of JNI refs is serial and performed serially by the current thread
3566 3574 // rather than by a worker. The following PSS will be used for processing
3567 3575 // JNI refs.
3568 3576
3569 3577 // Use only a single queue for this PSS.
3570 3578 G1ParScanThreadState* pss = per_thread_states->state_for_worker(0);
3571 3579 pss->set_ref_discoverer(NULL);
3572 3580 assert(pss->queue_is_empty(), "pre-condition");
3573 3581
3574 3582 // Keep alive closure.
3575 3583 G1CopyingKeepAliveClosure keep_alive(this, pss);
3576 3584
3577 3585 // Serial Complete GC closure
3578 3586 G1STWDrainQueueClosure drain_queue(this, pss);
3579 3587
3580 3588 // Setup the soft refs policy...
3581 3589 rp->setup_policy(false);
3582 3590
3583 3591 ReferenceProcessorPhaseTimes* pt = phase_times()->ref_phase_times();
3584 3592
3585 3593 ReferenceProcessorStats stats;
3586 3594 if (!rp->processing_is_mt()) {
3587 3595 // Serial reference processing...
3588 3596 stats = rp->process_discovered_references(&is_alive,
3589 3597 &keep_alive,
3590 3598 &drain_queue,
3591 3599 NULL,
3592 3600 pt);
3593 3601 } else {
3594 3602 uint no_of_gc_workers = workers()->active_workers();
3595 3603
3596 3604 // Parallel reference processing
3597 3605 assert(no_of_gc_workers <= rp->max_num_queues(),
3598 3606 "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
3599 3607 no_of_gc_workers, rp->max_num_queues());
3600 3608
3601 3609 G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues);
3602 3610 stats = rp->process_discovered_references(&is_alive,
3603 3611 &keep_alive,
3604 3612 &drain_queue,
3605 3613 &par_task_executor,
3606 3614 pt);
3607 3615 }
3608 3616
3609 3617 _gc_tracer_stw->report_gc_reference_stats(stats);
3610 3618
3611 3619 // We have completed copying any necessary live referent objects.
3612 3620 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3613 3621
3614 3622 make_pending_list_reachable();
3615 3623
3616 3624 assert(!rp->discovery_enabled(), "Postcondition");
3617 3625 rp->verify_no_references_recorded();
3618 3626
3619 3627 double ref_proc_time = os::elapsedTime() - ref_proc_start;
3620 3628 phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3621 3629 }
3622 3630
3623 3631 void G1CollectedHeap::make_pending_list_reachable() {
3624 3632 if (collector_state()->in_initial_mark_gc()) {
3625 3633 oop pll_head = Universe::reference_pending_list();
3626 3634 if (pll_head != NULL) {
3627 3635 // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3628 3636 _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3629 3637 }
3630 3638 }
3631 3639 }
3632 3640
3633 3641 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3634 3642 Ticks start = Ticks::now();
3635 3643 per_thread_states->flush();
3636 3644 phase_times()->record_or_add_time_secs(G1GCPhaseTimes::MergePSS, 0 /* worker_id */, (Ticks::now() - start).seconds());
3637 3645 }
3638 3646
3639 3647 class G1PrepareEvacuationTask : public AbstractGangTask {
3640 3648 class G1PrepareRegionsClosure : public HeapRegionClosure {
3641 3649 G1CollectedHeap* _g1h;
3642 3650 G1PrepareEvacuationTask* _parent_task;
3643 3651 size_t _worker_humongous_total;
3644 3652 size_t _worker_humongous_candidates;
3645 3653
3646 3654 bool humongous_region_is_candidate(HeapRegion* region) const {
3647 3655 assert(region->is_starts_humongous(), "Must start a humongous object");
3648 3656
3649 3657 oop obj = oop(region->bottom());
3650 3658
3651 3659 // Dead objects cannot be eager reclaim candidates. Due to class
3652 3660 // unloading it is unsafe to query their classes so we return early.
3653 3661 if (_g1h->is_obj_dead(obj, region)) {
3654 3662 return false;
3655 3663 }
3656 3664
3657 3665 // If we do not have a complete remembered set for the region, then we can
3658 3666 // not be sure that we have all references to it.
3659 3667 if (!region->rem_set()->is_complete()) {
3660 3668 return false;
3661 3669 }
3662 3670 // Candidate selection must satisfy the following constraints
3663 3671 // while concurrent marking is in progress:
3664 3672 //
3665 3673 // * In order to maintain SATB invariants, an object must not be
3666 3674 // reclaimed if it was allocated before the start of marking and
3667 3675 // has not had its references scanned. Such an object must have
3668 3676 // its references (including type metadata) scanned to ensure no
3669 3677 // live objects are missed by the marking process. Objects
3670 3678 // allocated after the start of concurrent marking don't need to
3671 3679 // be scanned.
3672 3680 //
3673 3681 // * An object must not be reclaimed if it is on the concurrent
3674 3682 // mark stack. Objects allocated after the start of concurrent
3675 3683 // marking are never pushed on the mark stack.
3676 3684 //
3677 3685 // Nominating only objects allocated after the start of concurrent
3678 3686 // marking is sufficient to meet both constraints. This may miss
3679 3687 // some objects that satisfy the constraints, but the marking data
3680 3688 // structures don't support efficiently performing the needed
3681 3689 // additional tests or scrubbing of the mark stack.
3682 3690 //
3683 3691 // However, we presently only nominate is_typeArray() objects.
3684 3692 // A humongous object containing references induces remembered
3685 3693 // set entries on other regions. In order to reclaim such an
3686 3694 // object, those remembered sets would need to be cleaned up.
3687 3695 //
3688 3696 // We also treat is_typeArray() objects specially, allowing them
3689 3697 // to be reclaimed even if allocated before the start of
3690 3698 // concurrent mark. For this we rely on mark stack insertion to
3691 3699 // exclude is_typeArray() objects, preventing reclaiming an object
3692 3700 // that is in the mark stack. We also rely on the metadata for
3693 3701 // such objects to be built-in and so ensured to be kept live.
3694 3702 // Frequent allocation and drop of large binary blobs is an
3695 3703 // important use case for eager reclaim, and this special handling
3696 3704 // may reduce needed headroom.
3697 3705
3698 3706 return obj->is_typeArray() &&
3699 3707 _g1h->is_potential_eager_reclaim_candidate(region);
3700 3708 }
3701 3709
3702 3710 public:
3703 3711 G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) :
3704 3712 _g1h(g1h),
3705 3713 _parent_task(parent_task),
3706 3714 _worker_humongous_total(0),
3707 3715 _worker_humongous_candidates(0) { }
3708 3716
3709 3717 ~G1PrepareRegionsClosure() {
3710 3718 _parent_task->add_humongous_candidates(_worker_humongous_candidates);
3711 3719 _parent_task->add_humongous_total(_worker_humongous_total);
3712 3720 }
3713 3721
3714 3722 virtual bool do_heap_region(HeapRegion* hr) {
3715 3723 // First prepare the region for scanning
3716 3724 _g1h->rem_set()->prepare_region_for_scan(hr);
3717 3725
3718 3726 // Now check if region is a humongous candidate
3719 3727 if (!hr->is_starts_humongous()) {
3720 3728 _g1h->register_region_with_region_attr(hr);
3721 3729 return false;
3722 3730 }
3723 3731
3724 3732 uint index = hr->hrm_index();
3725 3733 if (humongous_region_is_candidate(hr)) {
3726 3734 _g1h->set_humongous_reclaim_candidate(index, true);
3727 3735 _g1h->register_humongous_region_with_region_attr(index);
3728 3736 _worker_humongous_candidates++;
3729 3737 // We will later handle the remembered sets of these regions.
3730 3738 } else {
3731 3739 _g1h->set_humongous_reclaim_candidate(index, false);
3732 3740 _g1h->register_region_with_region_attr(hr);
3733 3741 }
3734 3742 _worker_humongous_total++;
3735 3743
3736 3744 return false;
3737 3745 }
3738 3746 };
3739 3747
3740 3748 G1CollectedHeap* _g1h;
3741 3749 HeapRegionClaimer _claimer;
3742 3750 volatile size_t _humongous_total;
3743 3751 volatile size_t _humongous_candidates;
3744 3752 public:
3745 3753 G1PrepareEvacuationTask(G1CollectedHeap* g1h) :
3746 3754 AbstractGangTask("Prepare Evacuation"),
3747 3755 _g1h(g1h),
3748 3756 _claimer(_g1h->workers()->active_workers()),
3749 3757 _humongous_total(0),
3750 3758 _humongous_candidates(0) { }
3751 3759
3752 3760 ~G1PrepareEvacuationTask() {
3753 3761 _g1h->set_has_humongous_reclaim_candidate(_humongous_candidates > 0);
3754 3762 }
3755 3763
3756 3764 void work(uint worker_id) {
3757 3765 G1PrepareRegionsClosure cl(_g1h, this);
3758 3766 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id);
3759 3767 }
3760 3768
3761 3769 void add_humongous_candidates(size_t candidates) {
3762 3770 Atomic::add(&_humongous_candidates, candidates);
3763 3771 }
3764 3772
3765 3773 void add_humongous_total(size_t total) {
3766 3774 Atomic::add(&_humongous_total, total);
3767 3775 }
3768 3776
3769 3777 size_t humongous_candidates() {
3770 3778 return _humongous_candidates;
3771 3779 }
3772 3780
3773 3781 size_t humongous_total() {
3774 3782 return _humongous_total;
3775 3783 }
3776 3784 };
3777 3785
3778 3786 void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3779 3787 _bytes_used_during_gc = 0;
3780 3788
3781 3789 _expand_heap_after_alloc_failure = true;
3782 3790 _evacuation_failed = false;
3783 3791
3784 3792 // Disable the hot card cache.
3785 3793 _hot_card_cache->reset_hot_cache_claimed_index();
3786 3794 _hot_card_cache->set_use_cache(false);
3787 3795
3788 3796 // Initialize the GC alloc regions.
3789 3797 _allocator->init_gc_alloc_regions(evacuation_info);
3790 3798
3791 3799 {
3792 3800 Ticks start = Ticks::now();
3793 3801 rem_set()->prepare_for_scan_heap_roots();
3794 3802 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3795 3803 }
3796 3804
3797 3805 {
3798 3806 G1PrepareEvacuationTask g1_prep_task(this);
3799 3807 Tickspan task_time = run_task(&g1_prep_task);
3800 3808
3801 3809 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3802 3810 g1_prep_task.humongous_total(),
3803 3811 g1_prep_task.humongous_candidates());
3804 3812 }
3805 3813
3806 3814 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3807 3815 _preserved_marks_set.assert_empty();
3808 3816
3809 3817 #if COMPILER2_OR_JVMCI
3810 3818 DerivedPointerTable::clear();
3811 3819 #endif
3812 3820
3813 3821 // InitialMark needs claim bits to keep track of the marked-through CLDs.
3814 3822 if (collector_state()->in_initial_mark_gc()) {
3815 3823 concurrent_mark()->pre_initial_mark();
3816 3824
3817 3825 double start_clear_claimed_marks = os::elapsedTime();
3818 3826
3819 3827 ClassLoaderDataGraph::clear_claimed_marks();
3820 3828
3821 3829 double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3822 3830 phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3823 3831 }
3824 3832
3825 3833 // Should G1EvacuationFailureALot be in effect for this GC?
3826 3834 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
3827 3835 }
3828 3836
3829 3837 class G1EvacuateRegionsBaseTask : public AbstractGangTask {
3830 3838 protected:
3831 3839 G1CollectedHeap* _g1h;
3832 3840 G1ParScanThreadStateSet* _per_thread_states;
3833 3841 RefToScanQueueSet* _task_queues;
3834 3842 TaskTerminator _terminator;
3835 3843 uint _num_workers;
3836 3844
3837 3845 void evacuate_live_objects(G1ParScanThreadState* pss,
3838 3846 uint worker_id,
3839 3847 G1GCPhaseTimes::GCParPhases objcopy_phase,
3840 3848 G1GCPhaseTimes::GCParPhases termination_phase) {
3841 3849 G1GCPhaseTimes* p = _g1h->phase_times();
3842 3850
3843 3851 Ticks start = Ticks::now();
3844 3852 G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
3845 3853 cl.do_void();
3846 3854
3847 3855 assert(pss->queue_is_empty(), "should be empty");
3848 3856
3849 3857 Tickspan evac_time = (Ticks::now() - start);
3850 3858 p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
3851 3859
3852 3860 if (termination_phase == G1GCPhaseTimes::Termination) {
3853 3861 p->record_time_secs(termination_phase, worker_id, cl.term_time());
3854 3862 p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
3855 3863 } else {
3856 3864 p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
3857 3865 p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
3858 3866 }
3859 3867 assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming during evacuation");
3860 3868 }
3861 3869
3862 3870 virtual void start_work(uint worker_id) { }
3863 3871
3864 3872 virtual void end_work(uint worker_id) { }
3865 3873
3866 3874 virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
3867 3875
3868 3876 virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
3869 3877
3870 3878 public:
3871 3879 G1EvacuateRegionsBaseTask(const char* name, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet* task_queues, uint num_workers) :
3872 3880 AbstractGangTask(name),
3873 3881 _g1h(G1CollectedHeap::heap()),
3874 3882 _per_thread_states(per_thread_states),
3875 3883 _task_queues(task_queues),
3876 3884 _terminator(num_workers, _task_queues),
3877 3885 _num_workers(num_workers)
3878 3886 { }
3879 3887
3880 3888 void work(uint worker_id) {
3881 3889 start_work(worker_id);
3882 3890
3883 3891 {
3884 3892 ResourceMark rm;
3885 3893 HandleMark hm;
3886 3894
3887 3895 G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
3888 3896 pss->set_ref_discoverer(_g1h->ref_processor_stw());
3889 3897
3890 3898 scan_roots(pss, worker_id);
3891 3899 evacuate_live_objects(pss, worker_id);
3892 3900 }
3893 3901
3894 3902 end_work(worker_id);
3895 3903 }
3896 3904 };
3897 3905
3898 3906 class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
3899 3907 G1RootProcessor* _root_processor;
3900 3908
3901 3909 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3902 3910 _root_processor->evacuate_roots(pss, worker_id);
3903 3911 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy);
3904 3912 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy);
3905 3913 }
3906 3914
3907 3915 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3908 3916 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
3909 3917 }
3910 3918
3911 3919 void start_work(uint worker_id) {
3912 3920 _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
3913 3921 }
3914 3922
3915 3923 void end_work(uint worker_id) {
3916 3924 _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
3917 3925 }
3918 3926
3919 3927 public:
3920 3928 G1EvacuateRegionsTask(G1CollectedHeap* g1h,
3921 3929 G1ParScanThreadStateSet* per_thread_states,
3922 3930 RefToScanQueueSet* task_queues,
3923 3931 G1RootProcessor* root_processor,
3924 3932 uint num_workers) :
3925 3933 G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
3926 3934 _root_processor(root_processor)
3927 3935 { }
3928 3936 };
3929 3937
3930 3938 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3931 3939 G1GCPhaseTimes* p = phase_times();
3932 3940
3933 3941 {
3934 3942 Ticks start = Ticks::now();
3935 3943 rem_set()->merge_heap_roots(true /* initial_evacuation */);
3936 3944 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3937 3945 }
3938 3946
3939 3947 Tickspan task_time;
3940 3948 const uint num_workers = workers()->active_workers();
3941 3949
3942 3950 Ticks start_processing = Ticks::now();
3943 3951 {
3944 3952 G1RootProcessor root_processor(this, num_workers);
3945 3953 G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
3946 3954 task_time = run_task(&g1_par_task);
3947 3955 // Closing the inner scope will execute the destructor for the G1RootProcessor object.
3948 3956 // To extract its code root fixup time we measure total time of this scope and
3949 3957 // subtract from the time the WorkGang task took.
3950 3958 }
3951 3959 Tickspan total_processing = Ticks::now() - start_processing;
3952 3960
3953 3961 p->record_initial_evac_time(task_time.seconds() * 1000.0);
3954 3962 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3955 3963 }
3956 3964
3957 3965 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3958 3966
3959 3967 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3960 3968 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3961 3969 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3962 3970 }
3963 3971
3964 3972 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3965 3973 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3966 3974 }
3967 3975
3968 3976 public:
3969 3977 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3970 3978 RefToScanQueueSet* queues,
3971 3979 uint num_workers) :
3972 3980 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3973 3981 }
3974 3982 };
3975 3983
3976 3984 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3977 3985 class G1MarkScope : public MarkScope { };
3978 3986
3979 3987 Tickspan task_time;
3980 3988
3981 3989 Ticks start_processing = Ticks::now();
3982 3990 {
3983 3991 G1MarkScope code_mark_scope;
3984 3992 G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3985 3993 task_time = run_task(&task);
3986 3994 // See comment in evacuate_collection_set() for the reason of the scope.
3987 3995 }
3988 3996 Tickspan total_processing = Ticks::now() - start_processing;
3989 3997
3990 3998 G1GCPhaseTimes* p = phase_times();
3991 3999 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3992 4000 }
3993 4001
3994 4002 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3995 4003 const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
3996 4004
3997 4005 while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
3998 4006
3999 4007 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
4000 4008 double time_left_ms = MaxGCPauseMillis - time_used_ms;
4001 4009
4002 4010 if (time_left_ms < 0 ||
4003 4011 !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
4004 4012 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
4005 4013 _collection_set.optional_region_length(), time_left_ms);
4006 4014 break;
4007 4015 }
4008 4016
4009 4017 {
4010 4018 Ticks start = Ticks::now();
4011 4019 rem_set()->merge_heap_roots(false /* initial_evacuation */);
4012 4020 phase_times()->record_or_add_optional_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
4013 4021 }
4014 4022
4015 4023 {
4016 4024 Ticks start = Ticks::now();
4017 4025 evacuate_next_optional_regions(per_thread_states);
4018 4026 phase_times()->record_or_add_optional_evac_time((Ticks::now() - start).seconds() * 1000.0);
4019 4027 }
4020 4028 }
4021 4029
4022 4030 _collection_set.abandon_optional_collection_set(per_thread_states);
4023 4031 }
4024 4032
4025 4033 void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
4026 4034 G1RedirtyCardsQueueSet* rdcqs,
4027 4035 G1ParScanThreadStateSet* per_thread_states) {
4028 4036 G1GCPhaseTimes* p = phase_times();
4029 4037
4030 4038 rem_set()->cleanup_after_scan_heap_roots();
4031 4039
4032 4040 // Process any discovered reference objects - we have
4033 4041 // to do this _before_ we retire the GC alloc regions
4034 4042 // as we may have to copy some 'reachable' referent
4035 4043 // objects (and their reachable sub-graphs) that were
4036 4044 // not copied during the pause.
4037 4045 process_discovered_references(per_thread_states);
4038 4046
4039 4047 G1STWIsAliveClosure is_alive(this);
4040 4048 G1KeepAliveClosure keep_alive(this);
4041 4049
4042 4050 WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, p->weak_phase_times());
4043 4051
4044 4052 if (G1StringDedup::is_enabled()) {
4045 4053 double string_dedup_time_ms = os::elapsedTime();
4046 4054
4047 4055 string_dedup_cleaning(&is_alive, &keep_alive, p);
4048 4056
4049 4057 double string_cleanup_time_ms = (os::elapsedTime() - string_dedup_time_ms) * 1000.0;
4050 4058 p->record_string_deduplication_time(string_cleanup_time_ms);
4051 4059 }
4052 4060
4053 4061 _allocator->release_gc_alloc_regions(evacuation_info);
4054 4062
4055 4063 if (evacuation_failed()) {
4056 4064 restore_after_evac_failure(rdcqs);
4057 4065
4058 4066 // Reset the G1EvacuationFailureALot counters and flags
4059 4067 NOT_PRODUCT(reset_evacuation_should_fail();)
4060 4068
4061 4069 double recalculate_used_start = os::elapsedTime();
4062 4070 set_used(recalculate_used());
4063 4071 p->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
4064 4072
4065 4073 if (_archive_allocator != NULL) {
4066 4074 _archive_allocator->clear_used();
4067 4075 }
4068 4076 for (uint i = 0; i < ParallelGCThreads; i++) {
4069 4077 if (_evacuation_failed_info_array[i].has_failed()) {
4070 4078 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
4071 4079 }
4072 4080 }
4073 4081 } else {
4074 4082 // The "used" of the the collection set have already been subtracted
4075 4083 // when they were freed. Add in the bytes used.
4076 4084 increase_used(_bytes_used_during_gc);
4077 4085 }
4078 4086
4079 4087 _preserved_marks_set.assert_empty();
4080 4088
4081 4089 merge_per_thread_state_info(per_thread_states);
4082 4090
4083 4091 // Reset and re-enable the hot card cache.
4084 4092 // Note the counts for the cards in the regions in the
4085 4093 // collection set are reset when the collection set is freed.
4086 4094 _hot_card_cache->reset_hot_cache();
4087 4095 _hot_card_cache->set_use_cache(true);
4088 4096
4089 4097 purge_code_root_memory();
4090 4098
4091 4099 redirty_logged_cards(rdcqs);
4092 4100
4093 4101 free_collection_set(&_collection_set, evacuation_info, per_thread_states->surviving_young_words());
4094 4102
4095 4103 eagerly_reclaim_humongous_regions();
4096 4104
4097 4105 record_obj_copy_mem_stats();
4098 4106
4099 4107 evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
4100 4108 evacuation_info.set_bytes_used(_bytes_used_during_gc);
4101 4109
4102 4110 #if COMPILER2_OR_JVMCI
4103 4111 double start = os::elapsedTime();
4104 4112 DerivedPointerTable::update_pointers();
4105 4113 phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
4106 4114 #endif
4107 4115 policy()->print_age_table();
4108 4116 }
4109 4117
4110 4118 void G1CollectedHeap::record_obj_copy_mem_stats() {
4111 4119 policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4112 4120
4113 4121 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4114 4122 create_g1_evac_summary(&_old_evac_stats));
4115 4123 }
4116 4124
4117 4125 void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
4118 4126 assert(!hr->is_free(), "the region should not be free");
4119 4127 assert(!hr->is_empty(), "the region should not be empty");
4120 4128 assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
4121 4129
4122 4130 if (G1VerifyBitmaps) {
4123 4131 MemRegion mr(hr->bottom(), hr->end());
4124 4132 concurrent_mark()->clear_range_in_prev_bitmap(mr);
4125 4133 }
4126 4134
4127 4135 // Clear the card counts for this region.
4128 4136 // Note: we only need to do this if the region is not young
4129 4137 // (since we don't refine cards in young regions).
4130 4138 if (!hr->is_young()) {
4131 4139 _hot_card_cache->reset_card_counts(hr);
4132 4140 }
4133 4141
4134 4142 // Reset region metadata to allow reuse.
4135 4143 hr->hr_clear(true /* clear_space */);
4136 4144 _policy->remset_tracker()->update_at_free(hr);
4137 4145
4138 4146 if (free_list != NULL) {
4139 4147 free_list->add_ordered(hr);
4140 4148 }
4141 4149 }
4142 4150
4143 4151 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
4144 4152 FreeRegionList* free_list) {
4145 4153 assert(hr->is_humongous(), "this is only for humongous regions");
4146 4154 assert(free_list != NULL, "pre-condition");
4147 4155 hr->clear_humongous();
4148 4156 free_region(hr, free_list);
4149 4157 }
4150 4158
4151 4159 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
4152 4160 const uint humongous_regions_removed) {
4153 4161 if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4154 4162 MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4155 4163 _old_set.bulk_remove(old_regions_removed);
4156 4164 _humongous_set.bulk_remove(humongous_regions_removed);
4157 4165 }
4158 4166
4159 4167 }
4160 4168
4161 4169 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4162 4170 assert(list != NULL, "list can't be null");
4163 4171 if (!list->is_empty()) {
4164 4172 MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4165 4173 _hrm->insert_list_into_free_list(list);
4166 4174 }
4167 4175 }
4168 4176
4169 4177 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
4170 4178 decrease_used(bytes);
4171 4179 }
4172 4180
4173 4181 class G1FreeCollectionSetTask : public AbstractGangTask {
4174 4182 // Helper class to keep statistics for the collection set freeing
4175 4183 class FreeCSetStats {
4176 4184 size_t _before_used_bytes; // Usage in regions successfully evacutate
4177 4185 size_t _after_used_bytes; // Usage in regions failing evacuation
4178 4186 size_t _bytes_allocated_in_old_since_last_gc; // Size of young regions turned into old
4179 4187 size_t _failure_used_words; // Live size in failed regions
4180 4188 size_t _failure_waste_words; // Wasted size in failed regions
4181 4189 size_t _rs_length; // Remembered set size
4182 4190 uint _regions_freed; // Number of regions freed
4183 4191 public:
4184 4192 FreeCSetStats() :
4185 4193 _before_used_bytes(0),
4186 4194 _after_used_bytes(0),
4187 4195 _bytes_allocated_in_old_since_last_gc(0),
4188 4196 _failure_used_words(0),
4189 4197 _failure_waste_words(0),
4190 4198 _rs_length(0),
4191 4199 _regions_freed(0) { }
4192 4200
4193 4201 void merge_stats(FreeCSetStats* other) {
4194 4202 assert(other != NULL, "invariant");
4195 4203 _before_used_bytes += other->_before_used_bytes;
4196 4204 _after_used_bytes += other->_after_used_bytes;
4197 4205 _bytes_allocated_in_old_since_last_gc += other->_bytes_allocated_in_old_since_last_gc;
4198 4206 _failure_used_words += other->_failure_used_words;
4199 4207 _failure_waste_words += other->_failure_waste_words;
4200 4208 _rs_length += other->_rs_length;
4201 4209 _regions_freed += other->_regions_freed;
4202 4210 }
4203 4211
4204 4212 void report(G1CollectedHeap* g1h, G1EvacuationInfo* evacuation_info) {
4205 4213 evacuation_info->set_regions_freed(_regions_freed);
4206 4214 evacuation_info->increment_collectionset_used_after(_after_used_bytes);
4207 4215
4208 4216 g1h->decrement_summary_bytes(_before_used_bytes);
4209 4217 g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
4210 4218
4211 4219 G1Policy *policy = g1h->policy();
4212 4220 policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
4213 4221 policy->record_rs_length(_rs_length);
4214 4222 policy->cset_regions_freed();
4215 4223 }
4216 4224
4217 4225 void account_failed_region(HeapRegion* r) {
4218 4226 size_t used_words = r->marked_bytes() / HeapWordSize;
4219 4227 _failure_used_words += used_words;
4220 4228 _failure_waste_words += HeapRegion::GrainWords - used_words;
4221 4229 _after_used_bytes += r->used();
4222 4230
4223 4231 // When moving a young gen region to old gen, we "allocate" that whole
4224 4232 // region there. This is in addition to any already evacuated objects.
4225 4233 // Notify the policy about that. Old gen regions do not cause an
4226 4234 // additional allocation: both the objects still in the region and the
4227 4235 // ones already moved are accounted for elsewhere.
4228 4236 if (r->is_young()) {
4229 4237 _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
4230 4238 }
4231 4239 }
4232 4240
4233 4241 void account_evacuated_region(HeapRegion* r) {
4234 4242 _before_used_bytes += r->used();
4235 4243 _regions_freed += 1;
4236 4244 }
4237 4245
4238 4246 void account_rs_length(HeapRegion* r) {
4239 4247 _rs_length += r->rem_set()->occupied();
4240 4248 }
4241 4249 };
4242 4250
4243 4251 // Closure applied to all regions in the collection set.
4244 4252 class FreeCSetClosure : public HeapRegionClosure {
4245 4253 // Helper to send JFR events for regions.
4246 4254 class JFREventForRegion {
4247 4255 EventGCPhaseParallel _event;
4248 4256 public:
4249 4257 JFREventForRegion(HeapRegion* region, uint worker_id) : _event() {
4250 4258 _event.set_gcId(GCId::current());
4251 4259 _event.set_gcWorkerId(worker_id);
4252 4260 if (region->is_young()) {
4253 4261 _event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::YoungFreeCSet));
4254 4262 } else {
4255 4263 _event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::NonYoungFreeCSet));
4256 4264 }
4257 4265 }
4258 4266
4259 4267 ~JFREventForRegion() {
4260 4268 _event.commit();
4261 4269 }
4262 4270 };
4263 4271
4264 4272 // Helper to do timing for region work.
4265 4273 class TimerForRegion {
4266 4274 Tickspan& _time;
4267 4275 Ticks _start_time;
4268 4276 public:
4269 4277 TimerForRegion(Tickspan& time) : _time(time), _start_time(Ticks::now()) { }
4270 4278 ~TimerForRegion() {
4271 4279 _time += Ticks::now() - _start_time;
4272 4280 }
4273 4281 };
4274 4282
4275 4283 // FreeCSetClosure members
4276 4284 G1CollectedHeap* _g1h;
4277 4285 const size_t* _surviving_young_words;
4278 4286 uint _worker_id;
4279 4287 Tickspan _young_time;
4280 4288 Tickspan _non_young_time;
4281 4289 FreeCSetStats* _stats;
4282 4290
4283 4291 void assert_in_cset(HeapRegion* r) {
4284 4292 assert(r->young_index_in_cset() != 0 &&
4285 4293 (uint)r->young_index_in_cset() <= _g1h->collection_set()->young_region_length(),
4286 4294 "Young index %u is wrong for region %u of type %s with %u young regions",
4287 4295 r->young_index_in_cset(), r->hrm_index(), r->get_type_str(), _g1h->collection_set()->young_region_length());
4288 4296 }
4289 4297
4290 4298 void handle_evacuated_region(HeapRegion* r) {
4291 4299 assert(!r->is_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4292 4300 stats()->account_evacuated_region(r);
4293 4301
4294 4302 // Free the region and and its remembered set.
4295 4303 _g1h->free_region(r, NULL);
4296 4304 }
4297 4305
4298 4306 void handle_failed_region(HeapRegion* r) {
4299 4307 // Do some allocation statistics accounting. Regions that failed evacuation
4300 4308 // are always made old, so there is no need to update anything in the young
4301 4309 // gen statistics, but we need to update old gen statistics.
4302 4310 stats()->account_failed_region(r);
4303 4311
4304 4312 // Update the region state due to the failed evacuation.
4305 4313 r->handle_evacuation_failure();
4306 4314
4307 4315 // Add region to old set, need to hold lock.
4308 4316 MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4309 4317 _g1h->old_set_add(r);
4310 4318 }
4311 4319
4312 4320 Tickspan& timer_for_region(HeapRegion* r) {
4313 4321 return r->is_young() ? _young_time : _non_young_time;
4314 4322 }
4315 4323
4316 4324 FreeCSetStats* stats() {
4317 4325 return _stats;
4318 4326 }
4319 4327 public:
4320 4328 FreeCSetClosure(const size_t* surviving_young_words,
4321 4329 uint worker_id,
4322 4330 FreeCSetStats* stats) :
4323 4331 HeapRegionClosure(),
4324 4332 _g1h(G1CollectedHeap::heap()),
4325 4333 _surviving_young_words(surviving_young_words),
4326 4334 _worker_id(worker_id),
4327 4335 _young_time(),
4328 4336 _non_young_time(),
4329 4337 _stats(stats) { }
4330 4338
4331 4339 virtual bool do_heap_region(HeapRegion* r) {
4332 4340 assert(r->in_collection_set(), "Invariant: %u missing from CSet", r->hrm_index());
4333 4341 JFREventForRegion event(r, _worker_id);
4334 4342 TimerForRegion timer(timer_for_region(r));
4335 4343
4336 4344 _g1h->clear_region_attr(r);
4337 4345 stats()->account_rs_length(r);
4338 4346
4339 4347 if (r->is_young()) {
4340 4348 assert_in_cset(r);
4341 4349 r->record_surv_words_in_group(_surviving_young_words[r->young_index_in_cset()]);
4342 4350 }
4343 4351
4344 4352 if (r->evacuation_failed()) {
4345 4353 handle_failed_region(r);
4346 4354 } else {
4347 4355 handle_evacuated_region(r);
4348 4356 }
4349 4357 assert(!_g1h->is_on_master_free_list(r), "sanity");
4350 4358
4351 4359 return false;
4352 4360 }
4353 4361
4354 4362 void report_timing(Tickspan parallel_time) {
4355 4363 G1GCPhaseTimes* pt = _g1h->phase_times();
4356 4364 pt->record_time_secs(G1GCPhaseTimes::ParFreeCSet, _worker_id, parallel_time.seconds());
4357 4365 if (_young_time.value() > 0) {
4358 4366 pt->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, _worker_id, _young_time.seconds());
4359 4367 }
4360 4368 if (_non_young_time.value() > 0) {
4361 4369 pt->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, _worker_id, _non_young_time.seconds());
4362 4370 }
4363 4371 }
4364 4372 };
4365 4373
4366 4374 // G1FreeCollectionSetTask members
4367 4375 G1CollectedHeap* _g1h;
4368 4376 G1EvacuationInfo* _evacuation_info;
4369 4377 FreeCSetStats* _worker_stats;
4370 4378 HeapRegionClaimer _claimer;
4371 4379 const size_t* _surviving_young_words;
4372 4380 uint _active_workers;
4373 4381
4374 4382 FreeCSetStats* worker_stats(uint worker) {
4375 4383 return &_worker_stats[worker];
4376 4384 }
4377 4385
4378 4386 void report_statistics() {
4379 4387 // Merge the accounting
4380 4388 FreeCSetStats total_stats;
4381 4389 for (uint worker = 0; worker < _active_workers; worker++) {
4382 4390 total_stats.merge_stats(worker_stats(worker));
4383 4391 }
4384 4392 total_stats.report(_g1h, _evacuation_info);
4385 4393 }
4386 4394
4387 4395 public:
4388 4396 G1FreeCollectionSetTask(G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words, uint active_workers) :
4389 4397 AbstractGangTask("G1 Free Collection Set"),
4390 4398 _g1h(G1CollectedHeap::heap()),
4391 4399 _evacuation_info(evacuation_info),
4392 4400 _worker_stats(NEW_C_HEAP_ARRAY(FreeCSetStats, active_workers, mtGC)),
4393 4401 _claimer(active_workers),
4394 4402 _surviving_young_words(surviving_young_words),
4395 4403 _active_workers(active_workers) {
4396 4404 for (uint worker = 0; worker < active_workers; worker++) {
4397 4405 ::new (&_worker_stats[worker]) FreeCSetStats();
4398 4406 }
4399 4407 }
4400 4408
4401 4409 ~G1FreeCollectionSetTask() {
4402 4410 Ticks serial_time = Ticks::now();
4403 4411 report_statistics();
4404 4412 for (uint worker = 0; worker < _active_workers; worker++) {
4405 4413 _worker_stats[worker].~FreeCSetStats();
4406 4414 }
4407 4415 FREE_C_HEAP_ARRAY(FreeCSetStats, _worker_stats);
4408 4416 _g1h->phase_times()->record_serial_free_cset_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
4409 4417 }
4410 4418
4411 4419 virtual void work(uint worker_id) {
4412 4420 EventGCPhaseParallel event;
4413 4421 Ticks start = Ticks::now();
4414 4422 FreeCSetClosure cl(_surviving_young_words, worker_id, worker_stats(worker_id));
4415 4423 _g1h->collection_set_par_iterate_all(&cl, &_claimer, worker_id);
4416 4424
4417 4425 // Report the total parallel time along with some more detailed metrics.
4418 4426 cl.report_timing(Ticks::now() - start);
4419 4427 event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ParFreeCSet));
4420 4428 }
4421 4429 };
4422 4430
4423 4431 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4424 4432 _eden.clear();
4425 4433
4426 4434 // The free collections set is split up in two tasks, the first
4427 4435 // frees the collection set and records what regions are free,
4428 4436 // and the second one rebuilds the free list. This proved to be
4429 4437 // more efficient than adding a sorted list to another.
4430 4438
4431 4439 Ticks free_cset_start_time = Ticks::now();
4432 4440 {
4433 4441 uint const num_cs_regions = _collection_set.region_length();
4434 4442 uint const num_workers = clamp(num_cs_regions, 1u, workers()->active_workers());
4435 4443 G1FreeCollectionSetTask cl(&evacuation_info, surviving_young_words, num_workers);
4436 4444
4437 4445 log_debug(gc, ergo)("Running %s using %u workers for collection set length %u (%u)",
4438 4446 cl.name(), num_workers, num_cs_regions, num_regions());
4439 4447 workers()->run_task(&cl, num_workers);
4440 4448 }
4441 4449
4442 4450 Ticks free_cset_end_time = Ticks::now();
4443 4451 phase_times()->record_total_free_cset_time_ms((free_cset_end_time - free_cset_start_time).seconds() * 1000.0);
4444 4452
4445 4453 // Now rebuild the free region list.
4446 4454 hrm()->rebuild_free_list(workers());
4447 4455 phase_times()->record_total_rebuild_freelist_time_ms((Ticks::now() - free_cset_end_time).seconds() * 1000.0);
4448 4456
4449 4457 collection_set->clear();
4450 4458 }
4451 4459
4452 4460 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4453 4461 private:
4454 4462 FreeRegionList* _free_region_list;
4455 4463 HeapRegionSet* _proxy_set;
4456 4464 uint _humongous_objects_reclaimed;
4457 4465 uint _humongous_regions_reclaimed;
4458 4466 size_t _freed_bytes;
4459 4467 public:
4460 4468
4461 4469 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4462 4470 _free_region_list(free_region_list), _proxy_set(NULL), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
4463 4471 }
4464 4472
4465 4473 virtual bool do_heap_region(HeapRegion* r) {
4466 4474 if (!r->is_starts_humongous()) {
4467 4475 return false;
4468 4476 }
4469 4477
4470 4478 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4471 4479
4472 4480 oop obj = (oop)r->bottom();
4473 4481 G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap();
4474 4482
4475 4483 // The following checks whether the humongous object is live are sufficient.
4476 4484 // The main additional check (in addition to having a reference from the roots
4477 4485 // or the young gen) is whether the humongous object has a remembered set entry.
4478 4486 //
4479 4487 // A humongous object cannot be live if there is no remembered set for it
4480 4488 // because:
4481 4489 // - there can be no references from within humongous starts regions referencing
4482 4490 // the object because we never allocate other objects into them.
4483 4491 // (I.e. there are no intra-region references that may be missed by the
4484 4492 // remembered set)
4485 4493 // - as soon there is a remembered set entry to the humongous starts region
4486 4494 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
4487 4495 // until the end of a concurrent mark.
4488 4496 //
4489 4497 // It is not required to check whether the object has been found dead by marking
4490 4498 // or not, in fact it would prevent reclamation within a concurrent cycle, as
4491 4499 // all objects allocated during that time are considered live.
4492 4500 // SATB marking is even more conservative than the remembered set.
4493 4501 // So if at this point in the collection there is no remembered set entry,
4494 4502 // nobody has a reference to it.
4495 4503 // At the start of collection we flush all refinement logs, and remembered sets
4496 4504 // are completely up-to-date wrt to references to the humongous object.
4497 4505 //
4498 4506 // Other implementation considerations:
4499 4507 // - never consider object arrays at this time because they would pose
4500 4508 // considerable effort for cleaning up the the remembered sets. This is
4501 4509 // required because stale remembered sets might reference locations that
4502 4510 // are currently allocated into.
4503 4511 uint region_idx = r->hrm_index();
4504 4512 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
4505 4513 !r->rem_set()->is_empty()) {
4506 4514 log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4507 4515 region_idx,
4508 4516 (size_t)obj->size() * HeapWordSize,
4509 4517 p2i(r->bottom()),
4510 4518 r->rem_set()->occupied(),
4511 4519 r->rem_set()->strong_code_roots_list_length(),
4512 4520 next_bitmap->is_marked(r->bottom()),
4513 4521 g1h->is_humongous_reclaim_candidate(region_idx),
4514 4522 obj->is_typeArray()
4515 4523 );
4516 4524 return false;
4517 4525 }
4518 4526
4519 4527 guarantee(obj->is_typeArray(),
4520 4528 "Only eagerly reclaiming type arrays is supported, but the object "
4521 4529 PTR_FORMAT " is not.", p2i(r->bottom()));
4522 4530
4523 4531 log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4524 4532 region_idx,
4525 4533 (size_t)obj->size() * HeapWordSize,
4526 4534 p2i(r->bottom()),
4527 4535 r->rem_set()->occupied(),
4528 4536 r->rem_set()->strong_code_roots_list_length(),
4529 4537 next_bitmap->is_marked(r->bottom()),
4530 4538 g1h->is_humongous_reclaim_candidate(region_idx),
4531 4539 obj->is_typeArray()
4532 4540 );
4533 4541
4534 4542 G1ConcurrentMark* const cm = g1h->concurrent_mark();
4535 4543 cm->humongous_object_eagerly_reclaimed(r);
4536 4544 assert(!cm->is_marked_in_prev_bitmap(obj) && !cm->is_marked_in_next_bitmap(obj),
4537 4545 "Eagerly reclaimed humongous region %u should not be marked at all but is in prev %s next %s",
4538 4546 region_idx,
4539 4547 BOOL_TO_STR(cm->is_marked_in_prev_bitmap(obj)),
4540 4548 BOOL_TO_STR(cm->is_marked_in_next_bitmap(obj)));
4541 4549 _humongous_objects_reclaimed++;
4542 4550 do {
4543 4551 HeapRegion* next = g1h->next_region_in_humongous(r);
4544 4552 _freed_bytes += r->used();
4545 4553 r->set_containing_set(NULL);
4546 4554 _humongous_regions_reclaimed++;
4547 4555 g1h->free_humongous_region(r, _free_region_list);
4548 4556 r = next;
4549 4557 } while (r != NULL);
4550 4558
4551 4559 return false;
4552 4560 }
4553 4561
4554 4562 uint humongous_objects_reclaimed() {
4555 4563 return _humongous_objects_reclaimed;
4556 4564 }
4557 4565
4558 4566 uint humongous_regions_reclaimed() {
4559 4567 return _humongous_regions_reclaimed;
4560 4568 }
4561 4569
4562 4570 size_t bytes_freed() const {
4563 4571 return _freed_bytes;
4564 4572 }
4565 4573 };
4566 4574
4567 4575 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
4568 4576 assert_at_safepoint_on_vm_thread();
4569 4577
4570 4578 if (!G1EagerReclaimHumongousObjects ||
4571 4579 (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
4572 4580 phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
4573 4581 return;
4574 4582 }
4575 4583
4576 4584 double start_time = os::elapsedTime();
4577 4585
4578 4586 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
4579 4587
4580 4588 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
4581 4589 heap_region_iterate(&cl);
4582 4590
4583 4591 remove_from_old_sets(0, cl.humongous_regions_reclaimed());
4584 4592
4585 4593 G1HRPrinter* hrp = hr_printer();
4586 4594 if (hrp->is_active()) {
4587 4595 FreeRegionListIterator iter(&local_cleanup_list);
4588 4596 while (iter.more_available()) {
4589 4597 HeapRegion* hr = iter.get_next();
4590 4598 hrp->cleanup(hr);
4591 4599 }
4592 4600 }
4593 4601
4594 4602 prepend_to_freelist(&local_cleanup_list);
4595 4603 decrement_summary_bytes(cl.bytes_freed());
4596 4604
4597 4605 phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4598 4606 cl.humongous_objects_reclaimed());
4599 4607 }
4600 4608
4601 4609 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
4602 4610 public:
4603 4611 virtual bool do_heap_region(HeapRegion* r) {
4604 4612 assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
4605 4613 G1CollectedHeap::heap()->clear_region_attr(r);
4606 4614 r->clear_young_index_in_cset();
4607 4615 return false;
4608 4616 }
4609 4617 };
4610 4618
4611 4619 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4612 4620 G1AbandonCollectionSetClosure cl;
4613 4621 collection_set_iterate_all(&cl);
4614 4622
4615 4623 collection_set->clear();
4616 4624 collection_set->stop_incremental_building();
4617 4625 }
4618 4626
4619 4627 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
4620 4628 return _allocator->is_retained_old_region(hr);
4621 4629 }
4622 4630
4623 4631 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
4624 4632 _eden.add(hr);
4625 4633 _policy->set_region_eden(hr);
4626 4634 }
4627 4635
4628 4636 #ifdef ASSERT
4629 4637
4630 4638 class NoYoungRegionsClosure: public HeapRegionClosure {
4631 4639 private:
4632 4640 bool _success;
4633 4641 public:
4634 4642 NoYoungRegionsClosure() : _success(true) { }
4635 4643 bool do_heap_region(HeapRegion* r) {
4636 4644 if (r->is_young()) {
4637 4645 log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
4638 4646 p2i(r->bottom()), p2i(r->end()));
4639 4647 _success = false;
4640 4648 }
4641 4649 return false;
4642 4650 }
4643 4651 bool success() { return _success; }
4644 4652 };
4645 4653
4646 4654 bool G1CollectedHeap::check_young_list_empty() {
4647 4655 bool ret = (young_regions_count() == 0);
4648 4656
4649 4657 NoYoungRegionsClosure closure;
4650 4658 heap_region_iterate(&closure);
4651 4659 ret = ret && closure.success();
4652 4660
4653 4661 return ret;
4654 4662 }
4655 4663
4656 4664 #endif // ASSERT
4657 4665
4658 4666 class TearDownRegionSetsClosure : public HeapRegionClosure {
4659 4667 HeapRegionSet *_old_set;
4660 4668
4661 4669 public:
4662 4670 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
4663 4671
4664 4672 bool do_heap_region(HeapRegion* r) {
4665 4673 if (r->is_old()) {
4666 4674 _old_set->remove(r);
4667 4675 } else if(r->is_young()) {
4668 4676 r->uninstall_surv_rate_group();
4669 4677 } else {
4670 4678 // We ignore free regions, we'll empty the free list afterwards.
4671 4679 // We ignore humongous and archive regions, we're not tearing down these
4672 4680 // sets.
4673 4681 assert(r->is_archive() || r->is_free() || r->is_humongous(),
4674 4682 "it cannot be another type");
4675 4683 }
4676 4684 return false;
4677 4685 }
4678 4686
4679 4687 ~TearDownRegionSetsClosure() {
4680 4688 assert(_old_set->is_empty(), "post-condition");
4681 4689 }
4682 4690 };
4683 4691
4684 4692 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
4685 4693 assert_at_safepoint_on_vm_thread();
4686 4694
4687 4695 if (!free_list_only) {
4688 4696 TearDownRegionSetsClosure cl(&_old_set);
4689 4697 heap_region_iterate(&cl);
4690 4698
4691 4699 // Note that emptying the _young_list is postponed and instead done as
4692 4700 // the first step when rebuilding the regions sets again. The reason for
4693 4701 // this is that during a full GC string deduplication needs to know if
4694 4702 // a collected region was young or old when the full GC was initiated.
4695 4703 }
4696 4704 _hrm->remove_all_free_regions();
4697 4705 }
4698 4706
4699 4707 void G1CollectedHeap::increase_used(size_t bytes) {
4700 4708 _summary_bytes_used += bytes;
4701 4709 }
4702 4710
4703 4711 void G1CollectedHeap::decrease_used(size_t bytes) {
4704 4712 assert(_summary_bytes_used >= bytes,
4705 4713 "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
4706 4714 _summary_bytes_used, bytes);
4707 4715 _summary_bytes_used -= bytes;
4708 4716 }
4709 4717
4710 4718 void G1CollectedHeap::set_used(size_t bytes) {
4711 4719 _summary_bytes_used = bytes;
4712 4720 }
4713 4721
4714 4722 class RebuildRegionSetsClosure : public HeapRegionClosure {
4715 4723 private:
4716 4724 bool _free_list_only;
4717 4725
4718 4726 HeapRegionSet* _old_set;
4719 4727 HeapRegionManager* _hrm;
4720 4728
4721 4729 size_t _total_used;
4722 4730
4723 4731 public:
4724 4732 RebuildRegionSetsClosure(bool free_list_only,
4725 4733 HeapRegionSet* old_set,
4726 4734 HeapRegionManager* hrm) :
4727 4735 _free_list_only(free_list_only),
4728 4736 _old_set(old_set), _hrm(hrm), _total_used(0) {
4729 4737 assert(_hrm->num_free_regions() == 0, "pre-condition");
4730 4738 if (!free_list_only) {
4731 4739 assert(_old_set->is_empty(), "pre-condition");
4732 4740 }
4733 4741 }
4734 4742
4735 4743 bool do_heap_region(HeapRegion* r) {
4736 4744 if (r->is_empty()) {
4737 4745 assert(r->rem_set()->is_empty(), "Empty regions should have empty remembered sets.");
4738 4746 // Add free regions to the free list
4739 4747 r->set_free();
4740 4748 _hrm->insert_into_free_list(r);
4741 4749 } else if (!_free_list_only) {
4742 4750 assert(r->rem_set()->is_empty(), "At this point remembered sets must have been cleared.");
4743 4751
4744 4752 if (r->is_archive() || r->is_humongous()) {
4745 4753 // We ignore archive and humongous regions. We left these sets unchanged.
4746 4754 } else {
4747 4755 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
4748 4756 // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such.
4749 4757 r->move_to_old();
4750 4758 _old_set->add(r);
4751 4759 }
4752 4760 _total_used += r->used();
4753 4761 }
4754 4762
4755 4763 return false;
4756 4764 }
4757 4765
4758 4766 size_t total_used() {
4759 4767 return _total_used;
4760 4768 }
4761 4769 };
4762 4770
4763 4771 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4764 4772 assert_at_safepoint_on_vm_thread();
4765 4773
4766 4774 if (!free_list_only) {
4767 4775 _eden.clear();
4768 4776 _survivor.clear();
4769 4777 }
4770 4778
4771 4779 RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4772 4780 heap_region_iterate(&cl);
4773 4781
4774 4782 if (!free_list_only) {
4775 4783 set_used(cl.total_used());
4776 4784 if (_archive_allocator != NULL) {
4777 4785 _archive_allocator->clear_used();
4778 4786 }
4779 4787 }
4780 4788 assert_used_and_recalculate_used_equal(this);
4781 4789 }
4782 4790
4783 4791 // Methods for the mutator alloc region
4784 4792
4785 4793 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4786 4794 bool force,
4787 4795 uint node_index) {
4788 4796 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4789 4797 bool should_allocate = policy()->should_allocate_mutator_region();
4790 4798 if (force || should_allocate) {
4791 4799 HeapRegion* new_alloc_region = new_region(word_size,
4792 4800 HeapRegionType::Eden,
4793 4801 false /* do_expand */,
4794 4802 node_index);
4795 4803 if (new_alloc_region != NULL) {
4796 4804 set_region_short_lived_locked(new_alloc_region);
4797 4805 _hr_printer.alloc(new_alloc_region, !should_allocate);
4798 4806 _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4799 4807 _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4800 4808 return new_alloc_region;
4801 4809 }
4802 4810 }
4803 4811 return NULL;
4804 4812 }
4805 4813
4806 4814 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4807 4815 size_t allocated_bytes) {
4808 4816 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4809 4817 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4810 4818
4811 4819 collection_set()->add_eden_region(alloc_region);
4812 4820 increase_used(allocated_bytes);
4813 4821 _eden.add_used_bytes(allocated_bytes);
4814 4822 _hr_printer.retire(alloc_region);
4815 4823
4816 4824 // We update the eden sizes here, when the region is retired,
4817 4825 // instead of when it's allocated, since this is the point that its
4818 4826 // used space has been recorded in _summary_bytes_used.
4819 4827 g1mm()->update_eden_size();
4820 4828 }
4821 4829
4822 4830 // Methods for the GC alloc regions
4823 4831
4824 4832 bool G1CollectedHeap::has_more_regions(G1HeapRegionAttr dest) {
4825 4833 if (dest.is_old()) {
4826 4834 return true;
4827 4835 } else {
4828 4836 return survivor_regions_count() < policy()->max_survivor_regions();
4829 4837 }
4830 4838 }
4831 4839
4832 4840 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index) {
4833 4841 assert(FreeList_lock->owned_by_self(), "pre-condition");
4834 4842
4835 4843 if (!has_more_regions(dest)) {
4836 4844 return NULL;
4837 4845 }
4838 4846
4839 4847 HeapRegionType type;
4840 4848 if (dest.is_young()) {
4841 4849 type = HeapRegionType::Survivor;
4842 4850 } else {
4843 4851 type = HeapRegionType::Old;
4844 4852 }
4845 4853
4846 4854 HeapRegion* new_alloc_region = new_region(word_size,
4847 4855 type,
4848 4856 true /* do_expand */,
4849 4857 node_index);
4850 4858
4851 4859 if (new_alloc_region != NULL) {
4852 4860 if (type.is_survivor()) {
4853 4861 new_alloc_region->set_survivor();
4854 4862 _survivor.add(new_alloc_region);
4855 4863 _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
4856 4864 } else {
4857 4865 new_alloc_region->set_old();
4858 4866 _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
4859 4867 }
4860 4868 _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4861 4869 register_region_with_region_attr(new_alloc_region);
4862 4870 _hr_printer.alloc(new_alloc_region);
4863 4871 return new_alloc_region;
4864 4872 }
4865 4873 return NULL;
4866 4874 }
4867 4875
4868 4876 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4869 4877 size_t allocated_bytes,
4870 4878 G1HeapRegionAttr dest) {
4871 4879 _bytes_used_during_gc += allocated_bytes;
4872 4880 if (dest.is_old()) {
4873 4881 old_set_add(alloc_region);
4874 4882 } else {
4875 4883 assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type());
4876 4884 _survivor.add_used_bytes(allocated_bytes);
4877 4885 }
4878 4886
4879 4887 bool const during_im = collector_state()->in_initial_mark_gc();
4880 4888 if (during_im && allocated_bytes > 0) {
4881 4889 _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top());
4882 4890 }
4883 4891 _hr_printer.retire(alloc_region);
4884 4892 }
4885 4893
4886 4894 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4887 4895 bool expanded = false;
4888 4896 uint index = _hrm->find_highest_free(&expanded);
4889 4897
4890 4898 if (index != G1_NO_HRM_INDEX) {
4891 4899 if (expanded) {
4892 4900 log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4893 4901 HeapRegion::GrainWords * HeapWordSize);
4894 4902 }
4895 4903 _hrm->allocate_free_regions_starting_at(index, 1);
4896 4904 return region_at(index);
4897 4905 }
4898 4906 return NULL;
4899 4907 }
4900 4908
4901 4909 // Optimized nmethod scanning
4902 4910
4903 4911 class RegisterNMethodOopClosure: public OopClosure {
4904 4912 G1CollectedHeap* _g1h;
4905 4913 nmethod* _nm;
4906 4914
4907 4915 template <class T> void do_oop_work(T* p) {
4908 4916 T heap_oop = RawAccess<>::oop_load(p);
4909 4917 if (!CompressedOops::is_null(heap_oop)) {
4910 4918 oop obj = CompressedOops::decode_not_null(heap_oop);
4911 4919 HeapRegion* hr = _g1h->heap_region_containing(obj);
4912 4920 assert(!hr->is_continues_humongous(),
4913 4921 "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
4914 4922 " starting at " HR_FORMAT,
4915 4923 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
4916 4924
4917 4925 // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
4918 4926 hr->add_strong_code_root_locked(_nm);
4919 4927 }
4920 4928 }
4921 4929
4922 4930 public:
4923 4931 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
4924 4932 _g1h(g1h), _nm(nm) {}
4925 4933
4926 4934 void do_oop(oop* p) { do_oop_work(p); }
4927 4935 void do_oop(narrowOop* p) { do_oop_work(p); }
4928 4936 };
4929 4937
4930 4938 class UnregisterNMethodOopClosure: public OopClosure {
4931 4939 G1CollectedHeap* _g1h;
4932 4940 nmethod* _nm;
4933 4941
4934 4942 template <class T> void do_oop_work(T* p) {
4935 4943 T heap_oop = RawAccess<>::oop_load(p);
4936 4944 if (!CompressedOops::is_null(heap_oop)) {
4937 4945 oop obj = CompressedOops::decode_not_null(heap_oop);
4938 4946 HeapRegion* hr = _g1h->heap_region_containing(obj);
4939 4947 assert(!hr->is_continues_humongous(),
4940 4948 "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
4941 4949 " starting at " HR_FORMAT,
4942 4950 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
4943 4951
4944 4952 hr->remove_strong_code_root(_nm);
4945 4953 }
4946 4954 }
4947 4955
4948 4956 public:
4949 4957 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
4950 4958 _g1h(g1h), _nm(nm) {}
4951 4959
4952 4960 void do_oop(oop* p) { do_oop_work(p); }
4953 4961 void do_oop(narrowOop* p) { do_oop_work(p); }
4954 4962 };
4955 4963
4956 4964 void G1CollectedHeap::register_nmethod(nmethod* nm) {
4957 4965 guarantee(nm != NULL, "sanity");
4958 4966 RegisterNMethodOopClosure reg_cl(this, nm);
4959 4967 nm->oops_do(®_cl);
4960 4968 }
4961 4969
4962 4970 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
4963 4971 guarantee(nm != NULL, "sanity");
4964 4972 UnregisterNMethodOopClosure reg_cl(this, nm);
4965 4973 nm->oops_do(®_cl, true);
4966 4974 }
4967 4975
4968 4976 void G1CollectedHeap::purge_code_root_memory() {
4969 4977 double purge_start = os::elapsedTime();
4970 4978 G1CodeRootSet::purge();
4971 4979 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
4972 4980 phase_times()->record_strong_code_root_purge_time(purge_time_ms);
4973 4981 }
4974 4982
4975 4983 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
4976 4984 G1CollectedHeap* _g1h;
4977 4985
4978 4986 public:
4979 4987 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
4980 4988 _g1h(g1h) {}
4981 4989
4982 4990 void do_code_blob(CodeBlob* cb) {
4983 4991 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
4984 4992 if (nm == NULL) {
4985 4993 return;
4986 4994 }
4987 4995
4988 4996 _g1h->register_nmethod(nm);
4989 4997 }
4990 4998 };
4991 4999
4992 5000 void G1CollectedHeap::rebuild_strong_code_roots() {
4993 5001 RebuildStrongCodeRootClosure blob_cl(this);
4994 5002 CodeCache::blobs_do(&blob_cl);
4995 5003 }
4996 5004
4997 5005 void G1CollectedHeap::initialize_serviceability() {
4998 5006 _g1mm->initialize_serviceability();
4999 5007 }
5000 5008
5001 5009 MemoryUsage G1CollectedHeap::memory_usage() {
5002 5010 return _g1mm->memory_usage();
5003 5011 }
5004 5012
5005 5013 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
5006 5014 return _g1mm->memory_managers();
5007 5015 }
5008 5016
5009 5017 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
5010 5018 return _g1mm->memory_pools();
5011 5019 }
↓ open down ↓ |
1862 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX