16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc/parallel/cardTableExtension.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
32 #include "gc/parallel/psMarkSweep.hpp"
33 #include "gc/parallel/psParallelCompact.inline.hpp"
34 #include "gc/parallel/psScavenge.inline.hpp"
35 #include "gc/parallel/psTasks.hpp"
36 #include "gc/shared/collectorPolicy.hpp"
37 #include "gc/shared/gcCause.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.inline.hpp"
41 #include "gc/shared/gcTimer.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.inline.hpp"
44 #include "gc/shared/isGCActiveMark.hpp"
45 #include "gc/shared/referencePolicy.hpp"
46 #include "gc/shared/referenceProcessor.hpp"
47 #include "gc/shared/spaceDecorator.hpp"
48 #include "gc/shared/weakProcessor.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "logging/log.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "runtime/biasedLocking.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/threadCritical.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "runtime/vm_operations.hpp"
57 #include "services/memoryService.hpp"
58 #include "utilities/stack.inline.hpp"
59
60 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
61 int PSScavenge::_consecutive_skipped_scavenges = 0;
62 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
63 CardTableExtension* PSScavenge::_card_table = NULL;
64 bool PSScavenge::_survivor_overflow = false;
65 uint PSScavenge::_tenuring_threshold = 0;
66 HeapWord* PSScavenge::_young_generation_boundary = NULL;
211 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
212 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
213
214 ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
215 PSAdaptiveSizePolicy* policy = heap->size_policy();
216 IsGCActiveMark mark;
217
218 const bool scavenge_done = PSScavenge::invoke_no_policy();
219 const bool need_full_gc = !scavenge_done ||
220 policy->should_full_GC(heap->old_gen()->free_in_bytes());
221 bool full_gc_done = false;
222
223 if (UsePerfData) {
224 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
225 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
226 counters->update_full_follows_scavenge(ffs_val);
227 }
228
229 if (need_full_gc) {
230 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
231 CollectorPolicy* cp = heap->collector_policy();
232 const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
233
234 if (UseParallelOldGC) {
235 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
236 } else {
237 full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
238 }
239 }
240
241 return full_gc_done;
242 }
243
244 // This method contains no policy. You should probably
245 // be calling invoke() instead.
246 bool PSScavenge::invoke_no_policy() {
247 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
248 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
249
250 _gc_timer.register_gc_start();
251
552 size_t max_old_gen_size = old_gen->max_gen_size();
553 size_t max_eden_size = max_young_size -
554 young_gen->from_space()->capacity_in_bytes() -
555 young_gen->to_space()->capacity_in_bytes();
556
557 // Used for diagnostics
558 size_policy->clear_generation_free_space_flags();
559
560 size_policy->compute_eden_space_size(young_live,
561 eden_live,
562 cur_eden,
563 max_eden_size,
564 false /* not full gc*/);
565
566 size_policy->check_gc_overhead_limit(young_live,
567 eden_live,
568 max_old_gen_size,
569 max_eden_size,
570 false /* not full gc*/,
571 gc_cause,
572 heap->collector_policy());
573
574 size_policy->decay_supplemental_growth(false /* not full gc*/);
575 }
576 // Resize the young generation at every collection
577 // even if new sizes have not been calculated. This is
578 // to allow resizes that may have been inhibited by the
579 // relative location of the "to" and "from" spaces.
580
581 // Resizing the old gen at young collections can cause increases
582 // that don't feed back to the generation sizing policy until
583 // a full collection. Don't resize the old gen here.
584
585 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
586 size_policy->calculated_survivor_size_in_bytes());
587
588 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
589 }
590
591 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
592 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc/parallel/cardTableExtension.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
32 #include "gc/parallel/psMarkSweep.hpp"
33 #include "gc/parallel/psParallelCompact.inline.hpp"
34 #include "gc/parallel/psScavenge.inline.hpp"
35 #include "gc/parallel/psTasks.hpp"
36 #include "gc/shared/gcCause.hpp"
37 #include "gc/shared/gcHeapSummary.hpp"
38 #include "gc/shared/gcId.hpp"
39 #include "gc/shared/gcLocker.inline.hpp"
40 #include "gc/shared/gcTimer.hpp"
41 #include "gc/shared/gcTrace.hpp"
42 #include "gc/shared/gcTraceTime.inline.hpp"
43 #include "gc/shared/isGCActiveMark.hpp"
44 #include "gc/shared/referencePolicy.hpp"
45 #include "gc/shared/referenceProcessor.hpp"
46 #include "gc/shared/softRefPolicy.hpp"
47 #include "gc/shared/spaceDecorator.hpp"
48 #include "gc/shared/weakProcessor.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "logging/log.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "runtime/biasedLocking.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/threadCritical.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "runtime/vm_operations.hpp"
57 #include "services/memoryService.hpp"
58 #include "utilities/stack.inline.hpp"
59
60 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
61 int PSScavenge::_consecutive_skipped_scavenges = 0;
62 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
63 CardTableExtension* PSScavenge::_card_table = NULL;
64 bool PSScavenge::_survivor_overflow = false;
65 uint PSScavenge::_tenuring_threshold = 0;
66 HeapWord* PSScavenge::_young_generation_boundary = NULL;
211 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
212 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
213
214 ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
215 PSAdaptiveSizePolicy* policy = heap->size_policy();
216 IsGCActiveMark mark;
217
218 const bool scavenge_done = PSScavenge::invoke_no_policy();
219 const bool need_full_gc = !scavenge_done ||
220 policy->should_full_GC(heap->old_gen()->free_in_bytes());
221 bool full_gc_done = false;
222
223 if (UsePerfData) {
224 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
225 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
226 counters->update_full_follows_scavenge(ffs_val);
227 }
228
229 if (need_full_gc) {
230 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
231 SoftRefPolicy* cp = heap->soft_ref_policy();
232 const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
233
234 if (UseParallelOldGC) {
235 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
236 } else {
237 full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
238 }
239 }
240
241 return full_gc_done;
242 }
243
244 // This method contains no policy. You should probably
245 // be calling invoke() instead.
246 bool PSScavenge::invoke_no_policy() {
247 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
248 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
249
250 _gc_timer.register_gc_start();
251
552 size_t max_old_gen_size = old_gen->max_gen_size();
553 size_t max_eden_size = max_young_size -
554 young_gen->from_space()->capacity_in_bytes() -
555 young_gen->to_space()->capacity_in_bytes();
556
557 // Used for diagnostics
558 size_policy->clear_generation_free_space_flags();
559
560 size_policy->compute_eden_space_size(young_live,
561 eden_live,
562 cur_eden,
563 max_eden_size,
564 false /* not full gc*/);
565
566 size_policy->check_gc_overhead_limit(young_live,
567 eden_live,
568 max_old_gen_size,
569 max_eden_size,
570 false /* not full gc*/,
571 gc_cause,
572 heap->soft_ref_policy());
573
574 size_policy->decay_supplemental_growth(false /* not full gc*/);
575 }
576 // Resize the young generation at every collection
577 // even if new sizes have not been calculated. This is
578 // to allow resizes that may have been inhibited by the
579 // relative location of the "to" and "from" spaces.
580
581 // Resizing the old gen at young collections can cause increases
582 // that don't feed back to the generation sizing policy until
583 // a full collection. Don't resize the old gen here.
584
585 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
586 size_policy->calculated_survivor_size_in_bytes());
587
588 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
589 }
590
591 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
592 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
|