548 result == NULL ? "NULL" : "object");
549
550 return result;
551 }
552
553 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
554 bool is_tlab,
555 bool parallel) {
556 // We don't attempt to expand the young generation (but perhaps we should.)
557 return allocate(size, is_tlab);
558 }
559
560 void DefNewGeneration::adjust_desired_tenuring_threshold() {
561 // Set the desired survivor size to half the real survivor space
562 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
563 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
564
565 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
566
567 if (UsePerfData) {
568 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
569 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
570 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
571 }
572
573 age_table()->print_age_table(_tenuring_threshold);
574 }
575
576 void DefNewGeneration::collect(bool full,
577 bool clear_all_soft_refs,
578 size_t size,
579 bool is_tlab) {
580 assert(full || size > 0, "otherwise we don't want to collect");
581
582 GenCollectedHeap* gch = GenCollectedHeap::heap();
583
584 _gc_timer->register_gc_start();
585 DefNewTracer gc_tracer;
586 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
587
588 _old_gen = gch->old_gen();
934 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
935 !gch->incremental_collection_failed(),
936 "Twice in a row");
937 seen_incremental_collection_failed = false;
938 }
939 #endif // ASSERT
940 }
941
942 if (ZapUnusedHeapArea) {
943 eden()->check_mangled_unused_area_complete();
944 from()->check_mangled_unused_area_complete();
945 to()->check_mangled_unused_area_complete();
946 }
947
948 if (!CleanChunkPoolAsync) {
949 Chunk::clean_chunk_pool();
950 }
951
952 // update the generation and space performance counters
953 update_counters();
954 gch->gen_policy()->counters()->update_counters();
955 }
956
957 void DefNewGeneration::record_spaces_top() {
958 assert(ZapUnusedHeapArea, "Not mangling unused space");
959 eden()->set_top_for_allocations();
960 to()->set_top_for_allocations();
961 from()->set_top_for_allocations();
962 }
963
964 void DefNewGeneration::ref_processor_init() {
965 Generation::ref_processor_init();
966 }
967
968
969 void DefNewGeneration::update_counters() {
970 if (UsePerfData) {
971 _eden_counters->update_all();
972 _from_counters->update_all();
973 _to_counters->update_all();
974 _gen_counters->update_all();
|
548 result == NULL ? "NULL" : "object");
549
550 return result;
551 }
552
553 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
554 bool is_tlab,
555 bool parallel) {
556 // We don't attempt to expand the young generation (but perhaps we should.)
557 return allocate(size, is_tlab);
558 }
559
560 void DefNewGeneration::adjust_desired_tenuring_threshold() {
561 // Set the desired survivor size to half the real survivor space
562 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
563 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
564
565 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
566
567 if (UsePerfData) {
568 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters();
569 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
570 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
571 }
572
573 age_table()->print_age_table(_tenuring_threshold);
574 }
575
576 void DefNewGeneration::collect(bool full,
577 bool clear_all_soft_refs,
578 size_t size,
579 bool is_tlab) {
580 assert(full || size > 0, "otherwise we don't want to collect");
581
582 GenCollectedHeap* gch = GenCollectedHeap::heap();
583
584 _gc_timer->register_gc_start();
585 DefNewTracer gc_tracer;
586 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
587
588 _old_gen = gch->old_gen();
934 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
935 !gch->incremental_collection_failed(),
936 "Twice in a row");
937 seen_incremental_collection_failed = false;
938 }
939 #endif // ASSERT
940 }
941
942 if (ZapUnusedHeapArea) {
943 eden()->check_mangled_unused_area_complete();
944 from()->check_mangled_unused_area_complete();
945 to()->check_mangled_unused_area_complete();
946 }
947
948 if (!CleanChunkPoolAsync) {
949 Chunk::clean_chunk_pool();
950 }
951
952 // update the generation and space performance counters
953 update_counters();
954 gch->counters()->update_counters();
955 }
956
957 void DefNewGeneration::record_spaces_top() {
958 assert(ZapUnusedHeapArea, "Not mangling unused space");
959 eden()->set_top_for_allocations();
960 to()->set_top_for_allocations();
961 from()->set_top_for_allocations();
962 }
963
964 void DefNewGeneration::ref_processor_init() {
965 Generation::ref_processor_init();
966 }
967
968
969 void DefNewGeneration::update_counters() {
970 if (UsePerfData) {
971 _eden_counters->update_all();
972 _from_counters->update_all();
973 _to_counters->update_all();
974 _gen_counters->update_all();
|