691 } 692 693 // At a promotion failure dump information on block layout in heap 694 // (cms old generation). 695 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { 696 Log(gc, promotion) log; 697 if (log.is_trace()) { 698 ResourceMark rm; 699 cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream()); 700 } 701 } 702 703 void ConcurrentMarkSweepGeneration::reset_after_compaction() { 704 // Clear the promotion information. These pointers can be adjusted 705 // along with all the other pointers into the heap but 706 // compaction is expected to be a rare event with 707 // a heap using cms so don't do it without seeing the need. 708 for (uint i = 0; i < ParallelGCThreads; i++) { 709 _par_gc_thread_states[i]->promo.reset(); 710 } 711 // No longer a need to do a concurrent collection for Metaspace. 712 MetaspaceGC::set_should_concurrent_collect(false); 713 } 714 715 void ConcurrentMarkSweepGeneration::compute_new_size() { 716 assert_locked_or_safepoint(Heap_lock); 717 718 // If incremental collection failed, we just want to expand 719 // to the limit. 720 if (incremental_collection_failed()) { 721 clear_incremental_collection_failed(); 722 grow_to_reserved(); 723 return; 724 } 725 726 // The heap has been compacted but not reset yet. 727 // Any metric such as free() or used() will be incorrect. 728 729 CardGeneration::compute_new_size(); 730 731 // Reset again after a possible resizing 732 if (did_compact()) { 1587 (free_size > 0 && (num == 1 || num == 2)), 1588 "There should be at most 2 free chunks after compaction"); 1589 #endif // ASSERT 1590 _collectorState = Resetting; 1591 assert(_restart_addr == NULL, 1592 "Should have been NULL'd before baton was passed"); 1593 reset_stw(); 1594 _cmsGen->reset_after_compaction(); 1595 _concurrent_cycles_since_last_unload = 0; 1596 1597 // Clear any data recorded in the PLAB chunk arrays. 1598 if (_survivor_plab_array != NULL) { 1599 reset_survivor_plab_arrays(); 1600 } 1601 1602 // Adjust the per-size allocation stats for the next epoch. 1603 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */); 1604 // Restart the "inter sweep timer" for the next epoch. 1605 _inter_sweep_timer.reset(); 1606 _inter_sweep_timer.start(); 1607 1608 gch->post_full_gc_dump(gc_timer); 1609 1610 gc_timer->register_gc_end(); 1611 1612 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); 1613 1614 // For a mark-sweep-compact, compute_new_size() will be called 1615 // in the heap's do_collection() method. 1616 } 1617 1618 void CMSCollector::print_eden_and_survivor_chunk_arrays() { 1619 Log(gc, heap) log; 1620 if (!log.is_trace()) { 1621 return; 1622 } 1623 1624 ContiguousSpace* eden_space = _young_gen->eden(); 1625 ContiguousSpace* from_space = _young_gen->from(); 1626 ContiguousSpace* to_space = _young_gen->to(); | 691 } 692 693 // At a promotion failure dump information on block layout in heap 694 // (cms old generation). 695 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { 696 Log(gc, promotion) log; 697 if (log.is_trace()) { 698 ResourceMark rm; 699 cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream()); 700 } 701 } 702 703 void ConcurrentMarkSweepGeneration::reset_after_compaction() { 704 // Clear the promotion information. These pointers can be adjusted 705 // along with all the other pointers into the heap but 706 // compaction is expected to be a rare event with 707 // a heap using cms so don't do it without seeing the need. 708 for (uint i = 0; i < ParallelGCThreads; i++) { 709 _par_gc_thread_states[i]->promo.reset(); 710 } 711 } 712 713 void ConcurrentMarkSweepGeneration::compute_new_size() { 714 assert_locked_or_safepoint(Heap_lock); 715 716 // If incremental collection failed, we just want to expand 717 // to the limit. 718 if (incremental_collection_failed()) { 719 clear_incremental_collection_failed(); 720 grow_to_reserved(); 721 return; 722 } 723 724 // The heap has been compacted but not reset yet. 725 // Any metric such as free() or used() will be incorrect. 726 727 CardGeneration::compute_new_size(); 728 729 // Reset again after a possible resizing 730 if (did_compact()) { 1585 (free_size > 0 && (num == 1 || num == 2)), 1586 "There should be at most 2 free chunks after compaction"); 1587 #endif // ASSERT 1588 _collectorState = Resetting; 1589 assert(_restart_addr == NULL, 1590 "Should have been NULL'd before baton was passed"); 1591 reset_stw(); 1592 _cmsGen->reset_after_compaction(); 1593 _concurrent_cycles_since_last_unload = 0; 1594 1595 // Clear any data recorded in the PLAB chunk arrays. 1596 if (_survivor_plab_array != NULL) { 1597 reset_survivor_plab_arrays(); 1598 } 1599 1600 // Adjust the per-size allocation stats for the next epoch. 1601 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */); 1602 // Restart the "inter sweep timer" for the next epoch. 1603 _inter_sweep_timer.reset(); 1604 _inter_sweep_timer.start(); 1605 1606 // No longer a need to do a concurrent collection for Metaspace. 1607 MetaspaceGC::set_should_concurrent_collect(false); 1608 1609 gch->post_full_gc_dump(gc_timer); 1610 1611 gc_timer->register_gc_end(); 1612 1613 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); 1614 1615 // For a mark-sweep-compact, compute_new_size() will be called 1616 // in the heap's do_collection() method. 1617 } 1618 1619 void CMSCollector::print_eden_and_survivor_chunk_arrays() { 1620 Log(gc, heap) log; 1621 if (!log.is_trace()) { 1622 return; 1623 } 1624 1625 ContiguousSpace* eden_space = _young_gen->eden(); 1626 ContiguousSpace* from_space = _young_gen->from(); 1627 ContiguousSpace* to_space = _young_gen->to(); |