547 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
548 "true" : "false",
549 Heap_lock->is_locked() ? "locked" : "unlocked",
550 from()->free(),
551 should_try_alloc ? "" : " should_allocate_from_space: NOT",
552 do_alloc ? " Heap_lock is not owned by self" : "",
553 result == NULL ? "NULL" : "object");
554
555 return result;
556 }
557
558 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
559 bool is_tlab,
560 bool parallel) {
561 // We don't attempt to expand the young generation (but perhaps we should.)
562 return allocate(size, is_tlab);
563 }
564
565 void DefNewGeneration::adjust_desired_tenuring_threshold() {
566 // Set the desired survivor size to half the real survivor space
567 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters();
568 _tenuring_threshold =
569 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
570 }
571
572 void DefNewGeneration::collect(bool full,
573 bool clear_all_soft_refs,
574 size_t size,
575 bool is_tlab) {
576 assert(full || size > 0, "otherwise we don't want to collect");
577
578 GenCollectedHeap* gch = GenCollectedHeap::heap();
579
580 _gc_timer->register_gc_start();
581 DefNewTracer gc_tracer;
582 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
583
584 _old_gen = gch->old_gen();
585
586 // If the next generation is too full to accommodate promotion
587 // from this generation, pass on collection; let the next generation
928 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
929 !gch->incremental_collection_failed(),
930 "Twice in a row");
931 seen_incremental_collection_failed = false;
932 }
933 #endif // ASSERT
934 }
935
936 if (ZapUnusedHeapArea) {
937 eden()->check_mangled_unused_area_complete();
938 from()->check_mangled_unused_area_complete();
939 to()->check_mangled_unused_area_complete();
940 }
941
942 if (!CleanChunkPoolAsync) {
943 Chunk::clean_chunk_pool();
944 }
945
946 // update the generation and space performance counters
947 update_counters();
948 gch->collector_policy()->counters()->update_counters();
949 }
950
951 void DefNewGeneration::record_spaces_top() {
952 assert(ZapUnusedHeapArea, "Not mangling unused space");
953 eden()->set_top_for_allocations();
954 to()->set_top_for_allocations();
955 from()->set_top_for_allocations();
956 }
957
958 void DefNewGeneration::ref_processor_init() {
959 Generation::ref_processor_init();
960 }
961
962
963 void DefNewGeneration::update_counters() {
964 if (UsePerfData) {
965 _eden_counters->update_all();
966 _from_counters->update_all();
967 _to_counters->update_all();
968 _gen_counters->update_all();
|
547 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
548 "true" : "false",
549 Heap_lock->is_locked() ? "locked" : "unlocked",
550 from()->free(),
551 should_try_alloc ? "" : " should_allocate_from_space: NOT",
552 do_alloc ? " Heap_lock is not owned by self" : "",
553 result == NULL ? "NULL" : "object");
554
555 return result;
556 }
557
558 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
559 bool is_tlab,
560 bool parallel) {
561 // We don't attempt to expand the young generation (but perhaps we should.)
562 return allocate(size, is_tlab);
563 }
564
565 void DefNewGeneration::adjust_desired_tenuring_threshold() {
566 // Set the desired survivor size to half the real survivor space
567 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
568 _tenuring_threshold =
569 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
570 }
571
572 void DefNewGeneration::collect(bool full,
573 bool clear_all_soft_refs,
574 size_t size,
575 bool is_tlab) {
576 assert(full || size > 0, "otherwise we don't want to collect");
577
578 GenCollectedHeap* gch = GenCollectedHeap::heap();
579
580 _gc_timer->register_gc_start();
581 DefNewTracer gc_tracer;
582 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
583
584 _old_gen = gch->old_gen();
585
586 // If the next generation is too full to accommodate promotion
587 // from this generation, pass on collection; let the next generation
928 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
929 !gch->incremental_collection_failed(),
930 "Twice in a row");
931 seen_incremental_collection_failed = false;
932 }
933 #endif // ASSERT
934 }
935
936 if (ZapUnusedHeapArea) {
937 eden()->check_mangled_unused_area_complete();
938 from()->check_mangled_unused_area_complete();
939 to()->check_mangled_unused_area_complete();
940 }
941
942 if (!CleanChunkPoolAsync) {
943 Chunk::clean_chunk_pool();
944 }
945
946 // update the generation and space performance counters
947 update_counters();
948 gch->gen_policy()->counters()->update_counters();
949 }
950
951 void DefNewGeneration::record_spaces_top() {
952 assert(ZapUnusedHeapArea, "Not mangling unused space");
953 eden()->set_top_for_allocations();
954 to()->set_top_for_allocations();
955 from()->set_top_for_allocations();
956 }
957
958 void DefNewGeneration::ref_processor_init() {
959 Generation::ref_processor_init();
960 }
961
962
963 void DefNewGeneration::update_counters() {
964 if (UsePerfData) {
965 _eden_counters->update_all();
966 _from_counters->update_all();
967 _to_counters->update_all();
968 _gen_counters->update_all();
|