547 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
548 "true" : "false",
549 Heap_lock->is_locked() ? "locked" : "unlocked",
550 from()->free(),
551 should_try_alloc ? "" : " should_allocate_from_space: NOT",
552 do_alloc ? " Heap_lock is not owned by self" : "",
553 result == NULL ? "NULL" : "object");
554
555 return result;
556 }
557
558 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
559 bool is_tlab,
560 bool parallel) {
561 // We don't attempt to expand the young generation (but perhaps we should.)
562 return allocate(size, is_tlab);
563 }
564
565 void DefNewGeneration::adjust_desired_tenuring_threshold() {
566 // Set the desired survivor size to half the real survivor space
567 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
568 _tenuring_threshold =
569 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
570 }
571
572 void DefNewGeneration::collect(bool full,
573 bool clear_all_soft_refs,
574 size_t size,
575 bool is_tlab) {
576 assert(full || size > 0, "otherwise we don't want to collect");
577
578 GenCollectedHeap* gch = GenCollectedHeap::heap();
579
580 _gc_timer->register_gc_start();
581 DefNewTracer gc_tracer;
582 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
583
584 _old_gen = gch->old_gen();
585
586 // If the next generation is too full to accommodate promotion
587 // from this generation, pass on collection; let the next generation
588 // do it.
589 if (!collection_attempt_is_safe()) {
|
547 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
548 "true" : "false",
549 Heap_lock->is_locked() ? "locked" : "unlocked",
550 from()->free(),
551 should_try_alloc ? "" : " should_allocate_from_space: NOT",
552 do_alloc ? " Heap_lock is not owned by self" : "",
553 result == NULL ? "NULL" : "object");
554
555 return result;
556 }
557
558 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
559 bool is_tlab,
560 bool parallel) {
561 // We don't attempt to expand the young generation (but perhaps we should.)
562 return allocate(size, is_tlab);
563 }
564
565 void DefNewGeneration::adjust_desired_tenuring_threshold() {
566 // Set the desired survivor size to half the real survivor space
567 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
568 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
569
570 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
571
572 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
573 if (UsePerfData) {
574 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
575 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
576 }
577
578 age_table()->print_age_table(_tenuring_threshold);
579 }
580
581 void DefNewGeneration::collect(bool full,
582 bool clear_all_soft_refs,
583 size_t size,
584 bool is_tlab) {
585 assert(full || size > 0, "otherwise we don't want to collect");
586
587 GenCollectedHeap* gch = GenCollectedHeap::heap();
588
589 _gc_timer->register_gc_start();
590 DefNewTracer gc_tracer;
591 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
592
593 _old_gen = gch->old_gen();
594
595 // If the next generation is too full to accommodate promotion
596 // from this generation, pass on collection; let the next generation
597 // do it.
598 if (!collection_attempt_is_safe()) {
|