303 }
304
305 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
306 bool* gc_overhead_limit_was_exceeded) {
307 return collector_policy()->mem_allocate_work(size,
308 false /* is_tlab */,
309 gc_overhead_limit_was_exceeded);
310 }
311
312 bool GenCollectedHeap::must_clear_all_soft_refs() {
313 return _gc_cause == GCCause::_last_ditch_collection;
314 }
315
316 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
317 return UseConcMarkSweepGC &&
318 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
319 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
320 }
321
322 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
323 bool is_tlab, bool run_verification, bool clear_soft_refs) {
324 // Timer for individual generations. Last argument is false: no CR
325 // FIXME: We should try to start the timing earlier to cover more of the GC pause
326 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
327 // so we can assume here that the next GC id is what we want.
328 GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
329 TraceCollectorStats tcs(gen->counters());
330 TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
331
332 size_t prev_used = gen->used();
333 gen->stat_record()->invocations++;
334 gen->stat_record()->accumulated_time.start();
335
336 // Must be done anew before each collection because
337 // a previous collection will do mangling and will
338 // change top of some spaces.
339 record_gen_tops_before_GC();
340
341 if (PrintGC && Verbose) {
342 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
343 gen->level(),
344 gen->stat_record()->invocations,
345 size * HeapWordSize);
346 }
347
348 if (run_verification && VerifyBeforeGC) {
349 HandleMark hm; // Discard invalid handles created during verification
350 Universe::verify(" VerifyBeforeGC:");
351 }
352 COMPILER2_PRESENT(DerivedPointerTable::clear());
353
354 // Do collection work
355 {
356 // Note on ref discovery: For what appear to be historical reasons,
357 // GCH enables and disabled (by enqueing) refs discovery.
358 // In the future this should be moved into the generation's
359 // collect method so that ref discovery and enqueueing concerns
360 // are local to a generation. The collect method could return
361 // an appropriate indication in the case that notification on
362 // the ref lock was needed. This will make the treatment of
363 // weak refs more uniform (and indeed remove such concerns
364 // from GCH). XXX
365
366 HandleMark hm; // Discard invalid handles created during gc
367 save_marks(); // save marks for all gens
368 // We want to discover references, but not process them yet.
369 // This mode is disabled in process_discovered_references if the
370 // generation does some collection work, or in
371 // enqueue_discovered_references if the generation returns
372 // without doing any work.
373 ReferenceProcessor* rp = gen->ref_processor();
374 // If the discovery of ("weak") refs in this generation is
375 // atomic wrt other collectors in this configuration, we
376 // are guaranteed to have empty discovered ref lists.
377 if (rp->discovery_is_atomic()) {
378 rp->enable_discovery();
379 rp->setup_policy(clear_soft_refs);
380 } else {
381 // collect() below will enable discovery as appropriate
382 }
383 gen->collect(full, clear_soft_refs, size, is_tlab);
384 if (!rp->enqueuing_is_done()) {
385 rp->enqueue_discovered_references();
386 } else {
387 rp->set_enqueuing_is_done(false);
388 }
389 rp->verify_no_references_recorded();
390 }
391
392 // Determine if allocation request was met.
393 if (size > 0) {
394 if (!is_tlab || gen->supports_tlab_allocation()) {
395 if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) {
396 size = 0;
397 }
398 }
399 }
400
401 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
402
403 gen->stat_record()->accumulated_time.stop();
404
405 update_gc_stats(gen->level(), full);
406
407 if (run_verification && VerifyAfterGC) {
408 HandleMark hm; // Discard invalid handles created during verification
409 Universe::verify(" VerifyAfterGC:");
410 }
411
412 if (PrintGCDetails) {
413 gclog_or_tty->print(":");
414 gen->print_heap_change(prev_used);
415 }
416 }
417
418 void GenCollectedHeap::do_collection(bool full,
419 bool clear_all_soft_refs,
420 size_t size,
445
446 print_heap_before_gc();
447
448 {
449 FlagSetting fl(_is_gc_active, true);
450
451 bool complete = full && (max_level == (n_gens()-1));
452 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
453 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
454 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
455 // so we can assume here that the next GC id is what we want.
456 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
457
458 gc_prologue(complete);
459 increment_total_collections(complete);
460
461 size_t gch_prev_used = used();
462 bool must_restore_marks_for_biased_locking = false;
463 bool run_verification = total_collections() >= VerifyGCStartAt;
464
465 if (_young_gen->performs_in_place_marking() ||
466 _old_gen->performs_in_place_marking()) {
467 // We want to avoid doing this for
468 // scavenge-only collections where it's unnecessary.
469 must_restore_marks_for_biased_locking = true;
470 BiasedLocking::preserve_marks();
471 }
472
473 bool prepared_for_verification = false;
474 int max_level_collected = 0;
475 if (!(full && _old_gen->full_collects_younger_generations()) &&
476 _young_gen->should_collect(full, size, is_tlab)) {
477 if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
478 prepare_for_verify();
479 prepared_for_verification = true;
480 }
481 collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs);
482 }
483 if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
484 if (!complete) {
485 // The full_collections increment was missed above.
486 increment_total_full_collections();
487 }
488 pre_full_gc_dump(NULL); // do any pre full gc dumps
489 if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) {
490 if (!prepared_for_verification) {
491 prepare_for_verify();
492 }
493 }
494 collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs);
495 max_level_collected = 1;
496 }
497
498 // Update "complete" boolean wrt what actually transpired --
499 // for instance, a promotion failure could have led to
500 // a whole heap collection.
501 complete = complete || (max_level_collected == n_gens() - 1);
502
503 if (complete) { // We did a "major" collection
504 // FIXME: See comment at pre_full_gc_dump call
505 post_full_gc_dump(NULL); // do any post full gc dumps
506 }
507
508 if (PrintGCDetails) {
509 print_heap_change(gch_prev_used);
510
511 // Print metaspace info for full GC with PrintGCDetails flag.
512 if (complete) {
513 MetaspaceAux::print_metaspace_change(metadata_prev_used);
514 }
632 }
633
634 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
635 void GenCollectedHeap:: \
636 oop_since_save_marks_iterate(int level, \
637 OopClosureType* cur, \
638 OopClosureType* older) { \
639 if (level == 0) { \
640 _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
641 _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \
642 } else { \
643 _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
644 } \
645 }
646
647 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
648
649 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
650
651 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
652 if (level == 0) {
653 if (!_young_gen->no_allocs_since_save_marks()) return false;
654 }
655 if (!_old_gen->no_allocs_since_save_marks()) return false;
656 return true;
657 }
658
659 bool GenCollectedHeap::supports_inline_contig_alloc() const {
660 return _young_gen->supports_inline_contig_alloc();
661 }
662
663 HeapWord** GenCollectedHeap::top_addr() const {
664 return _young_gen->top_addr();
665 }
666
667 HeapWord** GenCollectedHeap::end_addr() const {
668 return _young_gen->end_addr();
669 }
670
671 // public collection interfaces
672
673 void GenCollectedHeap::collect(GCCause::Cause cause) {
674 if (should_do_concurrent_full_gc(cause)) {
675 #if INCLUDE_ALL_GCS
676 // mostly concurrent full collection
|
303 }
304
305 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
306 bool* gc_overhead_limit_was_exceeded) {
307 return collector_policy()->mem_allocate_work(size,
308 false /* is_tlab */,
309 gc_overhead_limit_was_exceeded);
310 }
311
312 bool GenCollectedHeap::must_clear_all_soft_refs() {
313 return _gc_cause == GCCause::_last_ditch_collection;
314 }
315
316 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
317 return UseConcMarkSweepGC &&
318 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
319 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
320 }
321
322 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
323 bool is_tlab, bool run_verification, bool clear_soft_refs,
324 bool restore_marks_for_biased_locking) {
325 // Timer for individual generations. Last argument is false: no CR
326 // FIXME: We should try to start the timing earlier to cover more of the GC pause
327 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
328 // so we can assume here that the next GC id is what we want.
329 GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
330 TraceCollectorStats tcs(gen->counters());
331 TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
332
333 size_t prev_used = gen->used();
334 gen->stat_record()->invocations++;
335 gen->stat_record()->accumulated_time.start();
336
337 // Must be done anew before each collection because
338 // a previous collection will do mangling and will
339 // change top of some spaces.
340 record_gen_tops_before_GC();
341
342 if (PrintGC && Verbose) {
343 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
344 gen->level(),
345 gen->stat_record()->invocations,
346 size * HeapWordSize);
347 }
348
349 if (run_verification && VerifyBeforeGC) {
350 HandleMark hm; // Discard invalid handles created during verification
351 Universe::verify(" VerifyBeforeGC:");
352 }
353 COMPILER2_PRESENT(DerivedPointerTable::clear());
354
355 if (restore_marks_for_biased_locking) {
356 // We perform this mark word preservation work lazily
357 // because it's only at this point that we know whether we
358 // absolutely have to do it; we want to avoid doing it for
359 // scavenge-only collections where it's unnecessary
360 BiasedLocking::preserve_marks();
361 }
362
363 // Do collection work
364 {
365 // Note on ref discovery: For what appear to be historical reasons,
366 // GCH enables and disabled (by enqueing) refs discovery.
367 // In the future this should be moved into the generation's
368 // collect method so that ref discovery and enqueueing concerns
369 // are local to a generation. The collect method could return
370 // an appropriate indication in the case that notification on
371 // the ref lock was needed. This will make the treatment of
372 // weak refs more uniform (and indeed remove such concerns
373 // from GCH). XXX
374
375 HandleMark hm; // Discard invalid handles created during gc
376 save_marks(); // save marks for all gens
377 // We want to discover references, but not process them yet.
378 // This mode is disabled in process_discovered_references if the
379 // generation does some collection work, or in
380 // enqueue_discovered_references if the generation returns
381 // without doing any work.
382 ReferenceProcessor* rp = gen->ref_processor();
383 // If the discovery of ("weak") refs in this generation is
384 // atomic wrt other collectors in this configuration, we
385 // are guaranteed to have empty discovered ref lists.
386 if (rp->discovery_is_atomic()) {
387 rp->enable_discovery();
388 rp->setup_policy(clear_soft_refs);
389 } else {
390 // collect() below will enable discovery as appropriate
391 }
392 gen->collect(full, clear_soft_refs, size, is_tlab);
393 if (!rp->enqueuing_is_done()) {
394 rp->enqueue_discovered_references();
395 } else {
396 rp->set_enqueuing_is_done(false);
397 }
398 rp->verify_no_references_recorded();
399 }
400
401 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
402
403 gen->stat_record()->accumulated_time.stop();
404
405 update_gc_stats(gen->level(), full);
406
407 if (run_verification && VerifyAfterGC) {
408 HandleMark hm; // Discard invalid handles created during verification
409 Universe::verify(" VerifyAfterGC:");
410 }
411
412 if (PrintGCDetails) {
413 gclog_or_tty->print(":");
414 gen->print_heap_change(prev_used);
415 }
416 }
417
418 void GenCollectedHeap::do_collection(bool full,
419 bool clear_all_soft_refs,
420 size_t size,
445
446 print_heap_before_gc();
447
448 {
449 FlagSetting fl(_is_gc_active, true);
450
451 bool complete = full && (max_level == (n_gens()-1));
452 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
453 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
454 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
455 // so we can assume here that the next GC id is what we want.
456 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
457
458 gc_prologue(complete);
459 increment_total_collections(complete);
460
461 size_t gch_prev_used = used();
462 bool must_restore_marks_for_biased_locking = false;
463 bool run_verification = total_collections() >= VerifyGCStartAt;
464
465 bool prepared_for_verification = false;
466 int max_level_collected = 0;
467 if (!(max_level == 1 && full && _old_gen->full_collects_younger_generations()) &&
468 _young_gen->should_collect(full, size, is_tlab)) {
469 if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
470 prepare_for_verify();
471 prepared_for_verification = true;
472 }
473 if (_young_gen->performs_in_place_marking()) {
474 must_restore_marks_for_biased_locking = true;
475 }
476 collect_generation(_young_gen,
477 full,
478 size,
479 is_tlab,
480 run_verification && VerifyGCLevel <= 0,
481 do_clear_all_soft_refs,
482 must_restore_marks_for_biased_locking);
483
484 if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
485 size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
486 // Allocation request was met by young GC.
487 size = 0;
488 }
489 }
490
491 if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
492 if (!complete) {
493 // The full_collections increment was missed above.
494 increment_total_full_collections();
495 }
496 pre_full_gc_dump(NULL); // do any pre full gc dumps
497 if (!prepared_for_verification && run_verification &&
498 VerifyGCLevel <= 1 && VerifyBeforeGC) {
499 prepare_for_verify();
500 }
501 assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
502 collect_generation(_old_gen,
503 full,
504 size,
505 is_tlab,
506 run_verification && VerifyGCLevel <= 1,
507 do_clear_all_soft_refs,
508 !must_restore_marks_for_biased_locking);
509
510 must_restore_marks_for_biased_locking = true;
511 max_level_collected = 1;
512 }
513
514 // Update "complete" boolean wrt what actually transpired --
515 // for instance, a promotion failure could have led to
516 // a whole heap collection.
517 complete = complete || (max_level_collected == n_gens() - 1);
518
519 if (complete) { // We did a "major" collection
520 // FIXME: See comment at pre_full_gc_dump call
521 post_full_gc_dump(NULL); // do any post full gc dumps
522 }
523
524 if (PrintGCDetails) {
525 print_heap_change(gch_prev_used);
526
527 // Print metaspace info for full GC with PrintGCDetails flag.
528 if (complete) {
529 MetaspaceAux::print_metaspace_change(metadata_prev_used);
530 }
648 }
649
650 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
651 void GenCollectedHeap:: \
652 oop_since_save_marks_iterate(int level, \
653 OopClosureType* cur, \
654 OopClosureType* older) { \
655 if (level == 0) { \
656 _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
657 _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \
658 } else { \
659 _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
660 } \
661 }
662
663 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
664
665 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
666
667 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
668 if (level == 0 && !_young_gen->no_allocs_since_save_marks()) {
669 return false;
670 }
671 return _old_gen->no_allocs_since_save_marks();
672 }
673
674 bool GenCollectedHeap::supports_inline_contig_alloc() const {
675 return _young_gen->supports_inline_contig_alloc();
676 }
677
678 HeapWord** GenCollectedHeap::top_addr() const {
679 return _young_gen->top_addr();
680 }
681
682 HeapWord** GenCollectedHeap::end_addr() const {
683 return _young_gen->end_addr();
684 }
685
686 // public collection interfaces
687
688 void GenCollectedHeap::collect(GCCause::Cause cause) {
689 if (should_do_concurrent_full_gc(cause)) {
690 #if INCLUDE_ALL_GCS
691 // mostly concurrent full collection
|