Print this page
rev 7125 : 7176220: 'Full GC' events miss date stamp information occasionally
Summary: Move date stamp logic into GCTraceTime
Reviewed-by: brutisso, tschatzl
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/hotspot/src/share/vm/memory/genCollectedHeap.cpp
+++ new/hotspot/src/share/vm/memory/genCollectedHeap.cpp
1 1 /*
2 2 * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/symbolTable.hpp"
27 27 #include "classfile/systemDictionary.hpp"
28 28 #include "classfile/vmSymbols.hpp"
29 29 #include "code/icBuffer.hpp"
30 30 #include "gc_implementation/shared/collectorCounters.hpp"
31 31 #include "gc_implementation/shared/gcTrace.hpp"
32 32 #include "gc_implementation/shared/gcTraceTime.hpp"
33 33 #include "gc_implementation/shared/vmGCOperations.hpp"
34 34 #include "gc_interface/collectedHeap.inline.hpp"
35 35 #include "memory/filemap.hpp"
36 36 #include "memory/gcLocker.inline.hpp"
37 37 #include "memory/genCollectedHeap.hpp"
38 38 #include "memory/genOopClosures.inline.hpp"
39 39 #include "memory/generation.inline.hpp"
40 40 #include "memory/generationSpec.hpp"
41 41 #include "memory/resourceArea.hpp"
42 42 #include "memory/sharedHeap.hpp"
43 43 #include "memory/space.hpp"
44 44 #include "oops/oop.inline.hpp"
45 45 #include "oops/oop.inline2.hpp"
46 46 #include "runtime/biasedLocking.hpp"
47 47 #include "runtime/fprofiler.hpp"
48 48 #include "runtime/handles.hpp"
49 49 #include "runtime/handles.inline.hpp"
50 50 #include "runtime/java.hpp"
51 51 #include "runtime/vmThread.hpp"
52 52 #include "services/memoryService.hpp"
53 53 #include "utilities/vmError.hpp"
54 54 #include "utilities/workgroup.hpp"
55 55 #include "utilities/macros.hpp"
56 56 #if INCLUDE_ALL_GCS
57 57 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
58 58 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
59 59 #endif // INCLUDE_ALL_GCS
60 60
61 61 GenCollectedHeap* GenCollectedHeap::_gch;
62 62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
63 63
64 64 // The set of potentially parallel tasks in root scanning.
65 65 enum GCH_strong_roots_tasks {
66 66 // We probably want to parallelize both of these internally, but for now...
67 67 GCH_PS_younger_gens,
68 68 // Leave this one last.
69 69 GCH_PS_NumElements
70 70 };
71 71
72 72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
73 73 SharedHeap(policy),
74 74 _gen_policy(policy),
75 75 _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
76 76 _full_collections_completed(0)
77 77 {
78 78 if (_gen_process_roots_tasks == NULL ||
79 79 !_gen_process_roots_tasks->valid()) {
80 80 vm_exit_during_initialization("Failed necessary allocation.");
81 81 }
82 82 assert(policy != NULL, "Sanity check");
83 83 }
84 84
85 85 jint GenCollectedHeap::initialize() {
86 86 CollectedHeap::pre_initialize();
87 87
88 88 int i;
89 89 _n_gens = gen_policy()->number_of_generations();
90 90
91 91 // While there are no constraints in the GC code that HeapWordSize
92 92 // be any particular value, there are multiple other areas in the
93 93 // system which believe this to be true (e.g. oop->object_size in some
94 94 // cases incorrectly returns the size in wordSize units rather than
95 95 // HeapWordSize).
96 96 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
97 97
98 98 // The heap must be at least as aligned as generations.
99 99 size_t gen_alignment = Generation::GenGrain;
100 100
101 101 _gen_specs = gen_policy()->generations();
102 102
103 103 // Make sure the sizes are all aligned.
104 104 for (i = 0; i < _n_gens; i++) {
105 105 _gen_specs[i]->align(gen_alignment);
106 106 }
107 107
108 108 // Allocate space for the heap.
109 109
110 110 char* heap_address;
111 111 size_t total_reserved = 0;
112 112 int n_covered_regions = 0;
113 113 ReservedSpace heap_rs;
114 114
115 115 size_t heap_alignment = collector_policy()->heap_alignment();
116 116
117 117 heap_address = allocate(heap_alignment, &total_reserved,
118 118 &n_covered_regions, &heap_rs);
119 119
120 120 if (!heap_rs.is_reserved()) {
121 121 vm_shutdown_during_initialization(
122 122 "Could not reserve enough space for object heap");
123 123 return JNI_ENOMEM;
124 124 }
125 125
126 126 _reserved = MemRegion((HeapWord*)heap_rs.base(),
127 127 (HeapWord*)(heap_rs.base() + heap_rs.size()));
128 128
129 129 // It is important to do this in a way such that concurrent readers can't
130 130 // temporarily think somethings in the heap. (Seen this happen in asserts.)
131 131 _reserved.set_word_size(0);
132 132 _reserved.set_start((HeapWord*)heap_rs.base());
133 133 size_t actual_heap_size = heap_rs.size();
134 134 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
135 135
136 136 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
137 137 set_barrier_set(rem_set()->bs());
138 138
139 139 _gch = this;
140 140
141 141 for (i = 0; i < _n_gens; i++) {
142 142 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
143 143 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
144 144 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
145 145 }
146 146 clear_incremental_collection_failed();
147 147
148 148 #if INCLUDE_ALL_GCS
149 149 // If we are running CMS, create the collector responsible
150 150 // for collecting the CMS generations.
151 151 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
152 152 bool success = create_cms_collector();
153 153 if (!success) return JNI_ENOMEM;
154 154 }
155 155 #endif // INCLUDE_ALL_GCS
156 156
157 157 return JNI_OK;
158 158 }
159 159
160 160
161 161 char* GenCollectedHeap::allocate(size_t alignment,
162 162 size_t* _total_reserved,
163 163 int* _n_covered_regions,
164 164 ReservedSpace* heap_rs){
165 165 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
166 166 "the maximum representable size";
167 167
168 168 // Now figure out the total size.
169 169 size_t total_reserved = 0;
170 170 int n_covered_regions = 0;
171 171 const size_t pageSize = UseLargePages ?
172 172 os::large_page_size() : os::vm_page_size();
173 173
174 174 assert(alignment % pageSize == 0, "Must be");
175 175
176 176 for (int i = 0; i < _n_gens; i++) {
177 177 total_reserved += _gen_specs[i]->max_size();
178 178 if (total_reserved < _gen_specs[i]->max_size()) {
179 179 vm_exit_during_initialization(overflow_msg);
180 180 }
181 181 n_covered_regions += _gen_specs[i]->n_covered_regions();
182 182 }
183 183 assert(total_reserved % alignment == 0,
184 184 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
185 185 SIZE_FORMAT, total_reserved, alignment));
186 186
187 187 // Needed until the cardtable is fixed to have the right number
188 188 // of covered regions.
189 189 n_covered_regions += 2;
190 190
191 191 *_total_reserved = total_reserved;
192 192 *_n_covered_regions = n_covered_regions;
193 193
194 194 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
195 195 return heap_rs->base();
196 196 }
197 197
198 198
199 199 void GenCollectedHeap::post_initialize() {
200 200 SharedHeap::post_initialize();
201 201 TwoGenerationCollectorPolicy *policy =
202 202 (TwoGenerationCollectorPolicy *)collector_policy();
203 203 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
204 204 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
205 205 assert(def_new_gen->kind() == Generation::DefNew ||
206 206 def_new_gen->kind() == Generation::ParNew ||
207 207 def_new_gen->kind() == Generation::ASParNew,
208 208 "Wrong generation kind");
209 209
210 210 Generation* old_gen = get_gen(1);
211 211 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
212 212 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
213 213 old_gen->kind() == Generation::MarkSweepCompact,
214 214 "Wrong generation kind");
215 215
216 216 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
217 217 old_gen->capacity(),
218 218 def_new_gen->from()->capacity());
219 219 policy->initialize_gc_policy_counters();
220 220 }
221 221
222 222 void GenCollectedHeap::ref_processing_init() {
223 223 SharedHeap::ref_processing_init();
224 224 for (int i = 0; i < _n_gens; i++) {
225 225 _gens[i]->ref_processor_init();
226 226 }
227 227 }
228 228
229 229 size_t GenCollectedHeap::capacity() const {
230 230 size_t res = 0;
231 231 for (int i = 0; i < _n_gens; i++) {
232 232 res += _gens[i]->capacity();
233 233 }
234 234 return res;
235 235 }
236 236
237 237 size_t GenCollectedHeap::used() const {
238 238 size_t res = 0;
239 239 for (int i = 0; i < _n_gens; i++) {
240 240 res += _gens[i]->used();
241 241 }
242 242 return res;
243 243 }
244 244
245 245 // Save the "used_region" for generations level and lower.
246 246 void GenCollectedHeap::save_used_regions(int level) {
247 247 assert(level < _n_gens, "Illegal level parameter");
248 248 for (int i = level; i >= 0; i--) {
249 249 _gens[i]->save_used_region();
250 250 }
251 251 }
252 252
253 253 size_t GenCollectedHeap::max_capacity() const {
254 254 size_t res = 0;
255 255 for (int i = 0; i < _n_gens; i++) {
256 256 res += _gens[i]->max_capacity();
257 257 }
258 258 return res;
259 259 }
260 260
261 261 // Update the _full_collections_completed counter
262 262 // at the end of a stop-world full GC.
263 263 unsigned int GenCollectedHeap::update_full_collections_completed() {
264 264 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
265 265 assert(_full_collections_completed <= _total_full_collections,
266 266 "Can't complete more collections than were started");
267 267 _full_collections_completed = _total_full_collections;
268 268 ml.notify_all();
269 269 return _full_collections_completed;
270 270 }
271 271
272 272 // Update the _full_collections_completed counter, as appropriate,
273 273 // at the end of a concurrent GC cycle. Note the conditional update
274 274 // below to allow this method to be called by a concurrent collector
275 275 // without synchronizing in any manner with the VM thread (which
276 276 // may already have initiated a STW full collection "concurrently").
277 277 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
278 278 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
279 279 assert((_full_collections_completed <= _total_full_collections) &&
280 280 (count <= _total_full_collections),
281 281 "Can't complete more collections than were started");
282 282 if (count > _full_collections_completed) {
283 283 _full_collections_completed = count;
284 284 ml.notify_all();
285 285 }
286 286 return _full_collections_completed;
287 287 }
288 288
289 289
290 290 #ifndef PRODUCT
291 291 // Override of memory state checking method in CollectedHeap:
292 292 // Some collectors (CMS for example) can't have badHeapWordVal written
293 293 // in the first two words of an object. (For instance , in the case of
294 294 // CMS these words hold state used to synchronize between certain
295 295 // (concurrent) GC steps and direct allocating mutators.)
296 296 // The skip_header_HeapWords() method below, allows us to skip
297 297 // over the requisite number of HeapWord's. Note that (for
298 298 // generational collectors) this means that those many words are
299 299 // skipped in each object, irrespective of the generation in which
300 300 // that object lives. The resultant loss of precision seems to be
301 301 // harmless and the pain of avoiding that imprecision appears somewhat
302 302 // higher than we are prepared to pay for such rudimentary debugging
303 303 // support.
304 304 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
305 305 size_t size) {
306 306 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
307 307 // We are asked to check a size in HeapWords,
308 308 // but the memory is mangled in juint words.
309 309 juint* start = (juint*) (addr + skip_header_HeapWords());
310 310 juint* end = (juint*) (addr + size);
311 311 for (juint* slot = start; slot < end; slot += 1) {
312 312 assert(*slot == badHeapWordVal,
313 313 "Found non badHeapWordValue in pre-allocation check");
314 314 }
315 315 }
316 316 }
317 317 #endif
318 318
319 319 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
320 320 bool is_tlab,
321 321 bool first_only) {
322 322 HeapWord* res;
323 323 for (int i = 0; i < _n_gens; i++) {
324 324 if (_gens[i]->should_allocate(size, is_tlab)) {
325 325 res = _gens[i]->allocate(size, is_tlab);
326 326 if (res != NULL) return res;
327 327 else if (first_only) break;
328 328 }
329 329 }
330 330 // Otherwise...
331 331 return NULL;
332 332 }
333 333
334 334 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
335 335 bool* gc_overhead_limit_was_exceeded) {
336 336 return collector_policy()->mem_allocate_work(size,
337 337 false /* is_tlab */,
338 338 gc_overhead_limit_was_exceeded);
339 339 }
340 340
341 341 bool GenCollectedHeap::must_clear_all_soft_refs() {
342 342 return _gc_cause == GCCause::_last_ditch_collection;
343 343 }
344 344
345 345 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
346 346 return UseConcMarkSweepGC &&
347 347 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
348 348 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
349 349 }
350 350
351 351 void GenCollectedHeap::do_collection(bool full,
352 352 bool clear_all_soft_refs,
353 353 size_t size,
354 354 bool is_tlab,
355 355 int max_level) {
356 356 bool prepared_for_verification = false;
357 357 ResourceMark rm;
358 358 DEBUG_ONLY(Thread* my_thread = Thread::current();)
359 359
360 360 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
361 361 assert(my_thread->is_VM_thread() ||
362 362 my_thread->is_ConcurrentGC_thread(),
363 363 "incorrect thread type capability");
364 364 assert(Heap_lock->is_locked(),
365 365 "the requesting thread should have the Heap_lock");
366 366 guarantee(!is_gc_active(), "collection is not reentrant");
367 367 assert(max_level < n_gens(), "sanity check");
368 368
369 369 if (GC_locker::check_active_before_gc()) {
370 370 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
371 371 }
372 372
373 373 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
374 374 collector_policy()->should_clear_all_soft_refs();
375 375
376 376 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
↓ open down ↓ |
376 lines elided |
↑ open up ↑ |
377 377
378 378 const size_t metadata_prev_used = MetaspaceAux::used_bytes();
379 379
380 380 print_heap_before_gc();
381 381
382 382 {
383 383 FlagSetting fl(_is_gc_active, true);
384 384
385 385 bool complete = full && (max_level == (n_gens()-1));
386 386 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
387 - gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
388 387 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
389 388 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
390 389 // so we can assume here that the next GC id is what we want.
391 390 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
392 391
393 392 gc_prologue(complete);
394 393 increment_total_collections(complete);
395 394
396 395 size_t gch_prev_used = used();
397 396
398 397 int starting_level = 0;
399 398 if (full) {
400 399 // Search for the oldest generation which will collect all younger
401 400 // generations, and start collection loop there.
402 401 for (int i = max_level; i >= 0; i--) {
403 402 if (_gens[i]->full_collects_younger_generations()) {
404 403 starting_level = i;
405 404 break;
406 405 }
407 406 }
408 407 }
409 408
410 409 bool must_restore_marks_for_biased_locking = false;
411 410
412 411 int max_level_collected = starting_level;
413 412 for (int i = starting_level; i <= max_level; i++) {
414 413 if (_gens[i]->should_collect(full, size, is_tlab)) {
415 414 if (i == n_gens() - 1) { // a major collection is to happen
416 415 if (!complete) {
417 416 // The full_collections increment was missed above.
418 417 increment_total_full_collections();
419 418 }
420 419 pre_full_gc_dump(NULL); // do any pre full gc dumps
421 420 }
422 421 // Timer for individual generations. Last argument is false: no CR
423 422 // FIXME: We should try to start the timing earlier to cover more of the GC pause
424 423 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
425 424 // so we can assume here that the next GC id is what we want.
426 425 GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
427 426 TraceCollectorStats tcs(_gens[i]->counters());
428 427 TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
429 428
430 429 size_t prev_used = _gens[i]->used();
431 430 _gens[i]->stat_record()->invocations++;
432 431 _gens[i]->stat_record()->accumulated_time.start();
433 432
434 433 // Must be done anew before each collection because
435 434 // a previous collection will do mangling and will
436 435 // change top of some spaces.
437 436 record_gen_tops_before_GC();
438 437
439 438 if (PrintGC && Verbose) {
440 439 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
441 440 i,
442 441 _gens[i]->stat_record()->invocations,
443 442 size*HeapWordSize);
444 443 }
445 444
446 445 if (VerifyBeforeGC && i >= VerifyGCLevel &&
447 446 total_collections() >= VerifyGCStartAt) {
448 447 HandleMark hm; // Discard invalid handles created during verification
449 448 if (!prepared_for_verification) {
450 449 prepare_for_verify();
451 450 prepared_for_verification = true;
452 451 }
453 452 Universe::verify(" VerifyBeforeGC:");
454 453 }
455 454 COMPILER2_PRESENT(DerivedPointerTable::clear());
456 455
457 456 if (!must_restore_marks_for_biased_locking &&
458 457 _gens[i]->performs_in_place_marking()) {
459 458 // We perform this mark word preservation work lazily
460 459 // because it's only at this point that we know whether we
461 460 // absolutely have to do it; we want to avoid doing it for
462 461 // scavenge-only collections where it's unnecessary
463 462 must_restore_marks_for_biased_locking = true;
464 463 BiasedLocking::preserve_marks();
465 464 }
466 465
467 466 // Do collection work
468 467 {
469 468 // Note on ref discovery: For what appear to be historical reasons,
470 469 // GCH enables and disabled (by enqueing) refs discovery.
471 470 // In the future this should be moved into the generation's
472 471 // collect method so that ref discovery and enqueueing concerns
473 472 // are local to a generation. The collect method could return
474 473 // an appropriate indication in the case that notification on
475 474 // the ref lock was needed. This will make the treatment of
476 475 // weak refs more uniform (and indeed remove such concerns
477 476 // from GCH). XXX
478 477
479 478 HandleMark hm; // Discard invalid handles created during gc
480 479 save_marks(); // save marks for all gens
481 480 // We want to discover references, but not process them yet.
482 481 // This mode is disabled in process_discovered_references if the
483 482 // generation does some collection work, or in
484 483 // enqueue_discovered_references if the generation returns
485 484 // without doing any work.
486 485 ReferenceProcessor* rp = _gens[i]->ref_processor();
487 486 // If the discovery of ("weak") refs in this generation is
488 487 // atomic wrt other collectors in this configuration, we
489 488 // are guaranteed to have empty discovered ref lists.
490 489 if (rp->discovery_is_atomic()) {
491 490 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
492 491 rp->setup_policy(do_clear_all_soft_refs);
493 492 } else {
494 493 // collect() below will enable discovery as appropriate
495 494 }
496 495 _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
497 496 if (!rp->enqueuing_is_done()) {
498 497 rp->enqueue_discovered_references();
499 498 } else {
500 499 rp->set_enqueuing_is_done(false);
501 500 }
502 501 rp->verify_no_references_recorded();
503 502 }
504 503 max_level_collected = i;
505 504
506 505 // Determine if allocation request was met.
507 506 if (size > 0) {
508 507 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
509 508 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
510 509 size = 0;
511 510 }
512 511 }
513 512 }
514 513
515 514 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
516 515
517 516 _gens[i]->stat_record()->accumulated_time.stop();
518 517
519 518 update_gc_stats(i, full);
520 519
521 520 if (VerifyAfterGC && i >= VerifyGCLevel &&
522 521 total_collections() >= VerifyGCStartAt) {
523 522 HandleMark hm; // Discard invalid handles created during verification
524 523 Universe::verify(" VerifyAfterGC:");
525 524 }
526 525
527 526 if (PrintGCDetails) {
528 527 gclog_or_tty->print(":");
529 528 _gens[i]->print_heap_change(prev_used);
530 529 }
531 530 }
532 531 }
533 532
534 533 // Update "complete" boolean wrt what actually transpired --
535 534 // for instance, a promotion failure could have led to
536 535 // a whole heap collection.
537 536 complete = complete || (max_level_collected == n_gens() - 1);
538 537
539 538 if (complete) { // We did a "major" collection
540 539 // FIXME: See comment at pre_full_gc_dump call
541 540 post_full_gc_dump(NULL); // do any post full gc dumps
542 541 }
543 542
544 543 if (PrintGCDetails) {
545 544 print_heap_change(gch_prev_used);
546 545
547 546 // Print metaspace info for full GC with PrintGCDetails flag.
548 547 if (complete) {
549 548 MetaspaceAux::print_metaspace_change(metadata_prev_used);
550 549 }
551 550 }
552 551
553 552 for (int j = max_level_collected; j >= 0; j -= 1) {
554 553 // Adjust generation sizes.
555 554 _gens[j]->compute_new_size();
556 555 }
557 556
558 557 if (complete) {
559 558 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
560 559 ClassLoaderDataGraph::purge();
561 560 MetaspaceAux::verify_metrics();
562 561 // Resize the metaspace capacity after full collections
563 562 MetaspaceGC::compute_new_size();
564 563 update_full_collections_completed();
565 564 }
566 565
567 566 // Track memory usage and detect low memory after GC finishes
568 567 MemoryService::track_memory_usage();
569 568
570 569 gc_epilogue(complete);
571 570
572 571 if (must_restore_marks_for_biased_locking) {
573 572 BiasedLocking::restore_marks();
574 573 }
575 574 }
576 575
577 576 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
578 577 AdaptiveSizePolicyOutput(sp, total_collections());
579 578
580 579 print_heap_after_gc();
581 580
582 581 #ifdef TRACESPINNING
583 582 ParallelTaskTerminator::print_termination_counts();
584 583 #endif
585 584 }
586 585
587 586 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
588 587 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
589 588 }
590 589
591 590 void GenCollectedHeap::set_par_threads(uint t) {
592 591 SharedHeap::set_par_threads(t);
593 592 _gen_process_roots_tasks->set_n_threads(t);
594 593 }
595 594
596 595 void GenCollectedHeap::
597 596 gen_process_roots(int level,
598 597 bool younger_gens_as_roots,
599 598 bool activate_scope,
600 599 SharedHeap::ScanningOption so,
601 600 OopsInGenClosure* not_older_gens,
602 601 OopsInGenClosure* weak_roots,
603 602 OopsInGenClosure* older_gens,
604 603 CLDClosure* cld_closure,
605 604 CLDClosure* weak_cld_closure,
606 605 CodeBlobClosure* code_closure) {
607 606
608 607 // General roots.
609 608 SharedHeap::process_roots(activate_scope, so,
610 609 not_older_gens, weak_roots,
611 610 cld_closure, weak_cld_closure,
612 611 code_closure);
613 612
614 613 if (younger_gens_as_roots) {
615 614 if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
616 615 for (int i = 0; i < level; i++) {
617 616 not_older_gens->set_generation(_gens[i]);
618 617 _gens[i]->oop_iterate(not_older_gens);
619 618 }
620 619 not_older_gens->reset_generation();
621 620 }
622 621 }
623 622 // When collection is parallel, all threads get to cooperate to do
624 623 // older-gen scanning.
625 624 for (int i = level+1; i < _n_gens; i++) {
626 625 older_gens->set_generation(_gens[i]);
627 626 rem_set()->younger_refs_iterate(_gens[i], older_gens);
628 627 older_gens->reset_generation();
629 628 }
630 629
631 630 _gen_process_roots_tasks->all_tasks_completed();
632 631 }
633 632
634 633 void GenCollectedHeap::
635 634 gen_process_roots(int level,
636 635 bool younger_gens_as_roots,
637 636 bool activate_scope,
638 637 SharedHeap::ScanningOption so,
639 638 bool only_strong_roots,
640 639 OopsInGenClosure* not_older_gens,
641 640 OopsInGenClosure* older_gens,
642 641 CLDClosure* cld_closure) {
643 642
644 643 const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
645 644
646 645 bool is_moving_collection = false;
647 646 if (level == 0 || is_adjust_phase) {
648 647 // young collections are always moving
649 648 is_moving_collection = true;
650 649 }
651 650
652 651 MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
653 652 CodeBlobClosure* code_closure = &mark_code_closure;
654 653
655 654 gen_process_roots(level,
656 655 younger_gens_as_roots,
657 656 activate_scope, so,
658 657 not_older_gens, only_strong_roots ? NULL : not_older_gens,
659 658 older_gens,
660 659 cld_closure, only_strong_roots ? NULL : cld_closure,
661 660 code_closure);
662 661
663 662 }
664 663
665 664 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
666 665 SharedHeap::process_weak_roots(root_closure);
667 666 // "Local" "weak" refs
668 667 for (int i = 0; i < _n_gens; i++) {
669 668 _gens[i]->ref_processor()->weak_oops_do(root_closure);
670 669 }
671 670 }
672 671
673 672 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
674 673 void GenCollectedHeap:: \
675 674 oop_since_save_marks_iterate(int level, \
676 675 OopClosureType* cur, \
677 676 OopClosureType* older) { \
678 677 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
679 678 for (int i = level+1; i < n_gens(); i++) { \
680 679 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
681 680 } \
682 681 }
683 682
684 683 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
685 684
686 685 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
687 686
688 687 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
689 688 for (int i = level; i < _n_gens; i++) {
690 689 if (!_gens[i]->no_allocs_since_save_marks()) return false;
691 690 }
692 691 return true;
693 692 }
694 693
695 694 bool GenCollectedHeap::supports_inline_contig_alloc() const {
696 695 return _gens[0]->supports_inline_contig_alloc();
697 696 }
698 697
699 698 HeapWord** GenCollectedHeap::top_addr() const {
700 699 return _gens[0]->top_addr();
701 700 }
702 701
703 702 HeapWord** GenCollectedHeap::end_addr() const {
704 703 return _gens[0]->end_addr();
705 704 }
706 705
707 706 // public collection interfaces
708 707
709 708 void GenCollectedHeap::collect(GCCause::Cause cause) {
710 709 if (should_do_concurrent_full_gc(cause)) {
711 710 #if INCLUDE_ALL_GCS
712 711 // mostly concurrent full collection
713 712 collect_mostly_concurrent(cause);
714 713 #else // INCLUDE_ALL_GCS
715 714 ShouldNotReachHere();
716 715 #endif // INCLUDE_ALL_GCS
717 716 } else if (cause == GCCause::_wb_young_gc) {
718 717 // minor collection for WhiteBox API
719 718 collect(cause, 0);
720 719 } else {
721 720 #ifdef ASSERT
722 721 if (cause == GCCause::_scavenge_alot) {
723 722 // minor collection only
724 723 collect(cause, 0);
725 724 } else {
726 725 // Stop-the-world full collection
727 726 collect(cause, n_gens() - 1);
728 727 }
729 728 #else
730 729 // Stop-the-world full collection
731 730 collect(cause, n_gens() - 1);
732 731 #endif
733 732 }
734 733 }
735 734
736 735 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
737 736 // The caller doesn't have the Heap_lock
738 737 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
739 738 MutexLocker ml(Heap_lock);
740 739 collect_locked(cause, max_level);
741 740 }
742 741
743 742 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
744 743 // The caller has the Heap_lock
745 744 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
746 745 collect_locked(cause, n_gens() - 1);
747 746 }
748 747
749 748 // this is the private collection interface
750 749 // The Heap_lock is expected to be held on entry.
751 750
752 751 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
753 752 // Read the GC count while holding the Heap_lock
754 753 unsigned int gc_count_before = total_collections();
755 754 unsigned int full_gc_count_before = total_full_collections();
756 755 {
757 756 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
758 757 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
759 758 cause, max_level);
760 759 VMThread::execute(&op);
761 760 }
762 761 }
763 762
764 763 #if INCLUDE_ALL_GCS
765 764 bool GenCollectedHeap::create_cms_collector() {
766 765
767 766 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
768 767 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
769 768 "Unexpected generation kinds");
770 769 // Skip two header words in the block content verification
771 770 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
772 771 CMSCollector* collector = new CMSCollector(
773 772 (ConcurrentMarkSweepGeneration*)_gens[1],
774 773 _rem_set->as_CardTableRS(),
775 774 (ConcurrentMarkSweepPolicy*) collector_policy());
776 775
777 776 if (collector == NULL || !collector->completed_initialization()) {
778 777 if (collector) {
779 778 delete collector; // Be nice in embedded situation
780 779 }
781 780 vm_shutdown_during_initialization("Could not create CMS collector");
782 781 return false;
783 782 }
784 783 return true; // success
785 784 }
786 785
787 786 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
788 787 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
789 788
790 789 MutexLocker ml(Heap_lock);
791 790 // Read the GC counts while holding the Heap_lock
792 791 unsigned int full_gc_count_before = total_full_collections();
793 792 unsigned int gc_count_before = total_collections();
794 793 {
795 794 MutexUnlocker mu(Heap_lock);
796 795 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
797 796 VMThread::execute(&op);
798 797 }
799 798 }
800 799 #endif // INCLUDE_ALL_GCS
801 800
802 801 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
803 802 do_full_collection(clear_all_soft_refs, _n_gens - 1);
804 803 }
805 804
806 805 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
807 806 int max_level) {
808 807 int local_max_level;
809 808 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
810 809 gc_cause() == GCCause::_gc_locker) {
811 810 local_max_level = 0;
812 811 } else {
813 812 local_max_level = max_level;
814 813 }
815 814
816 815 do_collection(true /* full */,
817 816 clear_all_soft_refs /* clear_all_soft_refs */,
818 817 0 /* size */,
819 818 false /* is_tlab */,
820 819 local_max_level /* max_level */);
821 820 // Hack XXX FIX ME !!!
822 821 // A scavenge may not have been attempted, or may have
823 822 // been attempted and failed, because the old gen was too full
824 823 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
825 824 incremental_collection_will_fail(false /* don't consult_young */)) {
826 825 if (PrintGCDetails) {
827 826 gclog_or_tty->print_cr("GC locker: Trying a full collection "
828 827 "because scavenge failed");
829 828 }
830 829 // This time allow the old gen to be collected as well
831 830 do_collection(true /* full */,
832 831 clear_all_soft_refs /* clear_all_soft_refs */,
833 832 0 /* size */,
834 833 false /* is_tlab */,
835 834 n_gens() - 1 /* max_level */);
836 835 }
837 836 }
838 837
839 838 bool GenCollectedHeap::is_in_young(oop p) {
840 839 bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
841 840 assert(result == _gens[0]->is_in_reserved(p),
842 841 err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
843 842 return result;
844 843 }
845 844
846 845 // Returns "TRUE" iff "p" points into the committed areas of the heap.
847 846 bool GenCollectedHeap::is_in(const void* p) const {
848 847 #ifndef ASSERT
849 848 guarantee(VerifyBeforeGC ||
850 849 VerifyDuringGC ||
851 850 VerifyBeforeExit ||
852 851 VerifyDuringStartup ||
853 852 PrintAssembly ||
854 853 tty->count() != 0 || // already printing
855 854 VerifyAfterGC ||
856 855 VMError::fatal_error_in_progress(), "too expensive");
857 856
858 857 #endif
859 858 // This might be sped up with a cache of the last generation that
860 859 // answered yes.
861 860 for (int i = 0; i < _n_gens; i++) {
862 861 if (_gens[i]->is_in(p)) return true;
863 862 }
864 863 // Otherwise...
865 864 return false;
866 865 }
867 866
868 867 #ifdef ASSERT
869 868 // Don't implement this by using is_in_young(). This method is used
870 869 // in some cases to check that is_in_young() is correct.
871 870 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
872 871 assert(is_in_reserved(p) || p == NULL,
873 872 "Does not work if address is non-null and outside of the heap");
874 873 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
875 874 }
876 875 #endif
877 876
878 877 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
879 878 for (int i = 0; i < _n_gens; i++) {
880 879 _gens[i]->oop_iterate(cl);
881 880 }
882 881 }
883 882
884 883 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
885 884 for (int i = 0; i < _n_gens; i++) {
886 885 _gens[i]->object_iterate(cl);
887 886 }
888 887 }
889 888
890 889 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
891 890 for (int i = 0; i < _n_gens; i++) {
892 891 _gens[i]->safe_object_iterate(cl);
893 892 }
894 893 }
895 894
896 895 Space* GenCollectedHeap::space_containing(const void* addr) const {
897 896 for (int i = 0; i < _n_gens; i++) {
898 897 Space* res = _gens[i]->space_containing(addr);
899 898 if (res != NULL) return res;
900 899 }
901 900 // Otherwise...
902 901 assert(false, "Could not find containing space");
903 902 return NULL;
904 903 }
905 904
906 905
907 906 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
908 907 assert(is_in_reserved(addr), "block_start of address outside of heap");
909 908 for (int i = 0; i < _n_gens; i++) {
910 909 if (_gens[i]->is_in_reserved(addr)) {
911 910 assert(_gens[i]->is_in(addr),
912 911 "addr should be in allocated part of generation");
913 912 return _gens[i]->block_start(addr);
914 913 }
915 914 }
916 915 assert(false, "Some generation should contain the address");
917 916 return NULL;
918 917 }
919 918
920 919 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
921 920 assert(is_in_reserved(addr), "block_size of address outside of heap");
922 921 for (int i = 0; i < _n_gens; i++) {
923 922 if (_gens[i]->is_in_reserved(addr)) {
924 923 assert(_gens[i]->is_in(addr),
925 924 "addr should be in allocated part of generation");
926 925 return _gens[i]->block_size(addr);
927 926 }
928 927 }
929 928 assert(false, "Some generation should contain the address");
930 929 return 0;
931 930 }
932 931
933 932 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
934 933 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
935 934 assert(block_start(addr) == addr, "addr must be a block start");
936 935 for (int i = 0; i < _n_gens; i++) {
937 936 if (_gens[i]->is_in_reserved(addr)) {
938 937 return _gens[i]->block_is_obj(addr);
939 938 }
940 939 }
941 940 assert(false, "Some generation should contain the address");
942 941 return false;
943 942 }
944 943
945 944 bool GenCollectedHeap::supports_tlab_allocation() const {
946 945 for (int i = 0; i < _n_gens; i += 1) {
947 946 if (_gens[i]->supports_tlab_allocation()) {
948 947 return true;
949 948 }
950 949 }
951 950 return false;
952 951 }
953 952
954 953 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
955 954 size_t result = 0;
956 955 for (int i = 0; i < _n_gens; i += 1) {
957 956 if (_gens[i]->supports_tlab_allocation()) {
958 957 result += _gens[i]->tlab_capacity();
959 958 }
960 959 }
961 960 return result;
962 961 }
963 962
964 963 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
965 964 size_t result = 0;
966 965 for (int i = 0; i < _n_gens; i += 1) {
967 966 if (_gens[i]->supports_tlab_allocation()) {
968 967 result += _gens[i]->tlab_used();
969 968 }
970 969 }
971 970 return result;
972 971 }
973 972
974 973 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
975 974 size_t result = 0;
976 975 for (int i = 0; i < _n_gens; i += 1) {
977 976 if (_gens[i]->supports_tlab_allocation()) {
978 977 result += _gens[i]->unsafe_max_tlab_alloc();
979 978 }
980 979 }
981 980 return result;
982 981 }
983 982
984 983 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
985 984 bool gc_overhead_limit_was_exceeded;
986 985 return collector_policy()->mem_allocate_work(size /* size */,
987 986 true /* is_tlab */,
988 987 &gc_overhead_limit_was_exceeded);
989 988 }
990 989
991 990 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
992 991 // from the list headed by "*prev_ptr".
993 992 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
994 993 bool first = true;
995 994 size_t min_size = 0; // "first" makes this conceptually infinite.
996 995 ScratchBlock **smallest_ptr, *smallest;
997 996 ScratchBlock *cur = *prev_ptr;
998 997 while (cur) {
999 998 assert(*prev_ptr == cur, "just checking");
1000 999 if (first || cur->num_words < min_size) {
1001 1000 smallest_ptr = prev_ptr;
1002 1001 smallest = cur;
1003 1002 min_size = smallest->num_words;
1004 1003 first = false;
1005 1004 }
1006 1005 prev_ptr = &cur->next;
1007 1006 cur = cur->next;
1008 1007 }
1009 1008 smallest = *smallest_ptr;
1010 1009 *smallest_ptr = smallest->next;
1011 1010 return smallest;
1012 1011 }
1013 1012
1014 1013 // Sort the scratch block list headed by res into decreasing size order,
1015 1014 // and set "res" to the result.
1016 1015 static void sort_scratch_list(ScratchBlock*& list) {
1017 1016 ScratchBlock* sorted = NULL;
1018 1017 ScratchBlock* unsorted = list;
1019 1018 while (unsorted) {
1020 1019 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1021 1020 smallest->next = sorted;
1022 1021 sorted = smallest;
1023 1022 }
1024 1023 list = sorted;
1025 1024 }
1026 1025
1027 1026 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1028 1027 size_t max_alloc_words) {
1029 1028 ScratchBlock* res = NULL;
1030 1029 for (int i = 0; i < _n_gens; i++) {
1031 1030 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1032 1031 }
1033 1032 sort_scratch_list(res);
1034 1033 return res;
1035 1034 }
1036 1035
1037 1036 void GenCollectedHeap::release_scratch() {
1038 1037 for (int i = 0; i < _n_gens; i++) {
1039 1038 _gens[i]->reset_scratch();
1040 1039 }
1041 1040 }
1042 1041
1043 1042 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1044 1043 void do_generation(Generation* gen) {
1045 1044 gen->prepare_for_verify();
1046 1045 }
1047 1046 };
1048 1047
1049 1048 void GenCollectedHeap::prepare_for_verify() {
1050 1049 ensure_parsability(false); // no need to retire TLABs
1051 1050 GenPrepareForVerifyClosure blk;
1052 1051 generation_iterate(&blk, false);
1053 1052 }
1054 1053
1055 1054
1056 1055 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1057 1056 bool old_to_young) {
1058 1057 if (old_to_young) {
1059 1058 for (int i = _n_gens-1; i >= 0; i--) {
1060 1059 cl->do_generation(_gens[i]);
1061 1060 }
1062 1061 } else {
1063 1062 for (int i = 0; i < _n_gens; i++) {
1064 1063 cl->do_generation(_gens[i]);
1065 1064 }
1066 1065 }
1067 1066 }
1068 1067
1069 1068 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1070 1069 for (int i = 0; i < _n_gens; i++) {
1071 1070 _gens[i]->space_iterate(cl, true);
1072 1071 }
1073 1072 }
1074 1073
1075 1074 bool GenCollectedHeap::is_maximal_no_gc() const {
1076 1075 for (int i = 0; i < _n_gens; i++) {
1077 1076 if (!_gens[i]->is_maximal_no_gc()) {
1078 1077 return false;
1079 1078 }
1080 1079 }
1081 1080 return true;
1082 1081 }
1083 1082
1084 1083 void GenCollectedHeap::save_marks() {
1085 1084 for (int i = 0; i < _n_gens; i++) {
1086 1085 _gens[i]->save_marks();
1087 1086 }
1088 1087 }
1089 1088
1090 1089 GenCollectedHeap* GenCollectedHeap::heap() {
1091 1090 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1092 1091 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1093 1092 return _gch;
1094 1093 }
1095 1094
1096 1095
1097 1096 void GenCollectedHeap::prepare_for_compaction() {
1098 1097 guarantee(_n_gens = 2, "Wrong number of generations");
1099 1098 Generation* old_gen = _gens[1];
1100 1099 // Start by compacting into same gen.
1101 1100 CompactPoint cp(old_gen);
1102 1101 old_gen->prepare_for_compaction(&cp);
1103 1102 Generation* young_gen = _gens[0];
1104 1103 young_gen->prepare_for_compaction(&cp);
1105 1104 }
1106 1105
1107 1106 GCStats* GenCollectedHeap::gc_stats(int level) const {
1108 1107 return _gens[level]->gc_stats();
1109 1108 }
1110 1109
1111 1110 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1112 1111 for (int i = _n_gens-1; i >= 0; i--) {
1113 1112 Generation* g = _gens[i];
1114 1113 if (!silent) {
1115 1114 gclog_or_tty->print("%s", g->name());
1116 1115 gclog_or_tty->print(" ");
1117 1116 }
1118 1117 g->verify();
1119 1118 }
1120 1119 if (!silent) {
1121 1120 gclog_or_tty->print("remset ");
1122 1121 }
1123 1122 rem_set()->verify();
1124 1123 }
1125 1124
1126 1125 void GenCollectedHeap::print_on(outputStream* st) const {
1127 1126 for (int i = 0; i < _n_gens; i++) {
1128 1127 _gens[i]->print_on(st);
1129 1128 }
1130 1129 MetaspaceAux::print_on(st);
1131 1130 }
1132 1131
1133 1132 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1134 1133 if (workers() != NULL) {
1135 1134 workers()->threads_do(tc);
1136 1135 }
1137 1136 #if INCLUDE_ALL_GCS
1138 1137 if (UseConcMarkSweepGC) {
1139 1138 ConcurrentMarkSweepThread::threads_do(tc);
1140 1139 }
1141 1140 #endif // INCLUDE_ALL_GCS
1142 1141 }
1143 1142
1144 1143 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1145 1144 #if INCLUDE_ALL_GCS
1146 1145 if (UseParNewGC) {
1147 1146 workers()->print_worker_threads_on(st);
1148 1147 }
1149 1148 if (UseConcMarkSweepGC) {
1150 1149 ConcurrentMarkSweepThread::print_all_on(st);
1151 1150 }
1152 1151 #endif // INCLUDE_ALL_GCS
1153 1152 }
1154 1153
1155 1154 void GenCollectedHeap::print_on_error(outputStream* st) const {
1156 1155 this->CollectedHeap::print_on_error(st);
1157 1156
1158 1157 #if INCLUDE_ALL_GCS
1159 1158 if (UseConcMarkSweepGC) {
1160 1159 st->cr();
1161 1160 CMSCollector::print_on_error(st);
1162 1161 }
1163 1162 #endif // INCLUDE_ALL_GCS
1164 1163 }
1165 1164
1166 1165 void GenCollectedHeap::print_tracing_info() const {
1167 1166 if (TraceGen0Time) {
1168 1167 get_gen(0)->print_summary_info();
1169 1168 }
1170 1169 if (TraceGen1Time) {
1171 1170 get_gen(1)->print_summary_info();
1172 1171 }
1173 1172 }
1174 1173
1175 1174 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1176 1175 if (PrintGCDetails && Verbose) {
1177 1176 gclog_or_tty->print(" " SIZE_FORMAT
1178 1177 "->" SIZE_FORMAT
1179 1178 "(" SIZE_FORMAT ")",
1180 1179 prev_used, used(), capacity());
1181 1180 } else {
1182 1181 gclog_or_tty->print(" " SIZE_FORMAT "K"
1183 1182 "->" SIZE_FORMAT "K"
1184 1183 "(" SIZE_FORMAT "K)",
1185 1184 prev_used / K, used() / K, capacity() / K);
1186 1185 }
1187 1186 }
1188 1187
1189 1188 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1190 1189 private:
1191 1190 bool _full;
1192 1191 public:
1193 1192 void do_generation(Generation* gen) {
1194 1193 gen->gc_prologue(_full);
1195 1194 }
1196 1195 GenGCPrologueClosure(bool full) : _full(full) {};
1197 1196 };
1198 1197
1199 1198 void GenCollectedHeap::gc_prologue(bool full) {
1200 1199 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1201 1200
1202 1201 always_do_update_barrier = false;
1203 1202 // Fill TLAB's and such
1204 1203 CollectedHeap::accumulate_statistics_all_tlabs();
1205 1204 ensure_parsability(true); // retire TLABs
1206 1205
1207 1206 // Walk generations
1208 1207 GenGCPrologueClosure blk(full);
1209 1208 generation_iterate(&blk, false); // not old-to-young.
1210 1209 };
1211 1210
1212 1211 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1213 1212 private:
1214 1213 bool _full;
1215 1214 public:
1216 1215 void do_generation(Generation* gen) {
1217 1216 gen->gc_epilogue(_full);
1218 1217 }
1219 1218 GenGCEpilogueClosure(bool full) : _full(full) {};
1220 1219 };
1221 1220
1222 1221 void GenCollectedHeap::gc_epilogue(bool full) {
1223 1222 #ifdef COMPILER2
1224 1223 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1225 1224 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1226 1225 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1227 1226 #endif /* COMPILER2 */
1228 1227
1229 1228 resize_all_tlabs();
1230 1229
1231 1230 GenGCEpilogueClosure blk(full);
1232 1231 generation_iterate(&blk, false); // not old-to-young.
1233 1232
1234 1233 if (!CleanChunkPoolAsync) {
1235 1234 Chunk::clean_chunk_pool();
1236 1235 }
1237 1236
1238 1237 MetaspaceCounters::update_performance_counters();
1239 1238 CompressedClassSpaceCounters::update_performance_counters();
1240 1239
1241 1240 always_do_update_barrier = UseConcMarkSweepGC;
1242 1241 };
1243 1242
1244 1243 #ifndef PRODUCT
1245 1244 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1246 1245 private:
1247 1246 public:
1248 1247 void do_generation(Generation* gen) {
1249 1248 gen->record_spaces_top();
1250 1249 }
1251 1250 };
1252 1251
1253 1252 void GenCollectedHeap::record_gen_tops_before_GC() {
1254 1253 if (ZapUnusedHeapArea) {
1255 1254 GenGCSaveTopsBeforeGCClosure blk;
1256 1255 generation_iterate(&blk, false); // not old-to-young.
1257 1256 }
1258 1257 }
1259 1258 #endif // not PRODUCT
1260 1259
1261 1260 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1262 1261 public:
1263 1262 void do_generation(Generation* gen) {
1264 1263 gen->ensure_parsability();
1265 1264 }
1266 1265 };
1267 1266
1268 1267 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1269 1268 CollectedHeap::ensure_parsability(retire_tlabs);
1270 1269 GenEnsureParsabilityClosure ep_cl;
1271 1270 generation_iterate(&ep_cl, false);
1272 1271 }
1273 1272
1274 1273 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1275 1274 oop obj,
1276 1275 size_t obj_size) {
1277 1276 guarantee(old_gen->level() == 1, "We only get here with an old generation");
1278 1277 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1279 1278 HeapWord* result = NULL;
1280 1279
1281 1280 result = old_gen->expand_and_allocate(obj_size, false);
1282 1281
1283 1282 if (result != NULL) {
1284 1283 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1285 1284 }
1286 1285 return oop(result);
1287 1286 }
1288 1287
1289 1288 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1290 1289 jlong _time; // in ms
1291 1290 jlong _now; // in ms
1292 1291
1293 1292 public:
1294 1293 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1295 1294
1296 1295 jlong time() { return _time; }
1297 1296
1298 1297 void do_generation(Generation* gen) {
1299 1298 _time = MIN2(_time, gen->time_of_last_gc(_now));
1300 1299 }
1301 1300 };
1302 1301
1303 1302 jlong GenCollectedHeap::millis_since_last_gc() {
1304 1303 // We need a monotonically non-deccreasing time in ms but
1305 1304 // os::javaTimeMillis() does not guarantee monotonicity.
1306 1305 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1307 1306 GenTimeOfLastGCClosure tolgc_cl(now);
1308 1307 // iterate over generations getting the oldest
1309 1308 // time that a generation was collected
1310 1309 generation_iterate(&tolgc_cl, false);
1311 1310
1312 1311 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1313 1312 // provided the underlying platform provides such a time source
1314 1313 // (and it is bug free). So we still have to guard against getting
1315 1314 // back a time later than 'now'.
1316 1315 jlong retVal = now - tolgc_cl.time();
1317 1316 if (retVal < 0) {
1318 1317 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, (int64_t) retVal);)
1319 1318 return 0;
1320 1319 }
1321 1320 return retVal;
1322 1321 }
↓ open down ↓ |
925 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX