4540 return old_attempt_allocation(word_size);
4541 }
4542 } else {
4543 assert(purpose == GCAllocForTenured, "sanity");
4544 HeapWord* result = old_attempt_allocation(word_size);
4545 if (result != NULL) {
4546 return result;
4547 } else {
4548 // Let's try to allocate in the survivors in case we can fit the
4549 // object there.
4550 return survivor_attempt_allocation(word_size);
4551 }
4552 }
4553
4554 ShouldNotReachHere();
4555 // Trying to keep some compilers happy.
4556 return NULL;
4557 }
4558
4559 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4560 ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4561
4562 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
4563 : _g1h(g1h),
4564 _refs(g1h->task_queue(queue_num)),
4565 _dcq(&g1h->dirty_card_queue_set()),
4566 _ct_bs(g1h->g1_barrier_set()),
4567 _g1_rem(g1h->g1_rem_set()),
4568 _hash_seed(17), _queue_num(queue_num),
4569 _term_attempts(0),
4570 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4571 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4572 _age_table(false), _scanner(g1h, this, rp),
4573 _strong_roots_time(0), _term_time(0),
4574 _alloc_buffer_waste(0), _undo_waste(0) {
4575 // we allocate G1YoungSurvRateNumRegions plus one entries, since
4576 // we "sacrifice" entry 0 to keep track of surviving bytes for
4577 // non-young regions (where the age is -1)
4578 // We also add a few elements at the beginning and at the end in
4579 // an attempt to eliminate cache contention
4580 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4904 void G1ParEvacuateFollowersClosure::do_void() {
4905 StarTask stolen_task;
4906 G1ParScanThreadState* const pss = par_scan_state();
4907 pss->trim_queue();
4908
4909 do {
4910 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
4911 assert(pss->verify_task(stolen_task), "sanity");
4912 if (stolen_task.is_narrow()) {
4913 pss->deal_with_reference((narrowOop*) stolen_task);
4914 } else {
4915 pss->deal_with_reference((oop*) stolen_task);
4916 }
4917
4918 // We've just processed a reference and we might have made
4919 // available new entries on the queues. So we have to make sure
4920 // we drain the queues as necessary.
4921 pss->trim_queue();
4922 }
4923 } while (!offer_termination());
4924
4925 pss->retire_alloc_buffers();
4926 }
4927
4928 class G1KlassScanClosure : public KlassClosure {
4929 G1ParCopyHelper* _closure;
4930 bool _process_only_dirty;
4931 int _count;
4932 public:
4933 G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4934 : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4935 void do_klass(Klass* klass) {
4936 // If the klass has not been dirtied we know that there's
4937 // no references into the young gen and we can skip it.
4938 if (!_process_only_dirty || klass->has_modified_oops()) {
4939 // Clean the klass since we're going to scavenge all the metadata.
4940 klass->clear_modified_oops();
4941
4942 // Tell the closure that this klass is the Klass to scavenge
4943 // and is the one to dirty if oops are left pointing into the young gen.
4944 _closure->set_scanned_klass(klass);
4945
5731 // Serial reference processing...
5732 stats = rp->process_discovered_references(&is_alive,
5733 &keep_alive,
5734 &drain_queue,
5735 NULL,
5736 _gc_timer_stw);
5737 } else {
5738 // Parallel reference processing
5739 assert(rp->num_q() == no_of_gc_workers, "sanity");
5740 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5741
5742 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5743 stats = rp->process_discovered_references(&is_alive,
5744 &keep_alive,
5745 &drain_queue,
5746 &par_task_executor,
5747 _gc_timer_stw);
5748 }
5749
5750 _gc_tracer_stw->report_gc_reference_stats(stats);
5751 // We have completed copying any necessary live referent objects
5752 // (that were not copied during the actual pause) so we can
5753 // retire any active alloc buffers
5754 pss.retire_alloc_buffers();
5755 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5756
5757 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5758 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5759 }
5760
5761 // Weak Reference processing during an evacuation pause (part 2).
5762 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5763 double ref_enq_start = os::elapsedTime();
5764
5765 ReferenceProcessor* rp = _ref_processor_stw;
5766 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5767
5768 // Now enqueue any remaining on the discovered lists on to
5769 // the pending list.
5770 if (!rp->processing_is_mt()) {
5771 // Serial reference processing...
5772 rp->enqueue_discovered_references();
5773 } else {
5774 // Parallel reference enqueueing
|
4540 return old_attempt_allocation(word_size);
4541 }
4542 } else {
4543 assert(purpose == GCAllocForTenured, "sanity");
4544 HeapWord* result = old_attempt_allocation(word_size);
4545 if (result != NULL) {
4546 return result;
4547 } else {
4548 // Let's try to allocate in the survivors in case we can fit the
4549 // object there.
4550 return survivor_attempt_allocation(word_size);
4551 }
4552 }
4553
4554 ShouldNotReachHere();
4555 // Trying to keep some compilers happy.
4556 return NULL;
4557 }
4558
4559 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4560 ParGCAllocBuffer(gclab_word_size), _retired(true) { }
4561
4562 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
4563 : _g1h(g1h),
4564 _refs(g1h->task_queue(queue_num)),
4565 _dcq(&g1h->dirty_card_queue_set()),
4566 _ct_bs(g1h->g1_barrier_set()),
4567 _g1_rem(g1h->g1_rem_set()),
4568 _hash_seed(17), _queue_num(queue_num),
4569 _term_attempts(0),
4570 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4571 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4572 _age_table(false), _scanner(g1h, this, rp),
4573 _strong_roots_time(0), _term_time(0),
4574 _alloc_buffer_waste(0), _undo_waste(0) {
4575 // we allocate G1YoungSurvRateNumRegions plus one entries, since
4576 // we "sacrifice" entry 0 to keep track of surviving bytes for
4577 // non-young regions (where the age is -1)
4578 // We also add a few elements at the beginning and at the end in
4579 // an attempt to eliminate cache contention
4580 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4904 void G1ParEvacuateFollowersClosure::do_void() {
4905 StarTask stolen_task;
4906 G1ParScanThreadState* const pss = par_scan_state();
4907 pss->trim_queue();
4908
4909 do {
4910 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
4911 assert(pss->verify_task(stolen_task), "sanity");
4912 if (stolen_task.is_narrow()) {
4913 pss->deal_with_reference((narrowOop*) stolen_task);
4914 } else {
4915 pss->deal_with_reference((oop*) stolen_task);
4916 }
4917
4918 // We've just processed a reference and we might have made
4919 // available new entries on the queues. So we have to make sure
4920 // we drain the queues as necessary.
4921 pss->trim_queue();
4922 }
4923 } while (!offer_termination());
4924 }
4925
4926 class G1KlassScanClosure : public KlassClosure {
4927 G1ParCopyHelper* _closure;
4928 bool _process_only_dirty;
4929 int _count;
4930 public:
4931 G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4932 : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4933 void do_klass(Klass* klass) {
4934 // If the klass has not been dirtied we know that there's
4935 // no references into the young gen and we can skip it.
4936 if (!_process_only_dirty || klass->has_modified_oops()) {
4937 // Clean the klass since we're going to scavenge all the metadata.
4938 klass->clear_modified_oops();
4939
4940 // Tell the closure that this klass is the Klass to scavenge
4941 // and is the one to dirty if oops are left pointing into the young gen.
4942 _closure->set_scanned_klass(klass);
4943
5729 // Serial reference processing...
5730 stats = rp->process_discovered_references(&is_alive,
5731 &keep_alive,
5732 &drain_queue,
5733 NULL,
5734 _gc_timer_stw);
5735 } else {
5736 // Parallel reference processing
5737 assert(rp->num_q() == no_of_gc_workers, "sanity");
5738 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5739
5740 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5741 stats = rp->process_discovered_references(&is_alive,
5742 &keep_alive,
5743 &drain_queue,
5744 &par_task_executor,
5745 _gc_timer_stw);
5746 }
5747
5748 _gc_tracer_stw->report_gc_reference_stats(stats);
5749
5750 // We have completed copying any necessary live referent objects.
5751 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5752
5753 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5754 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5755 }
5756
5757 // Weak Reference processing during an evacuation pause (part 2).
5758 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5759 double ref_enq_start = os::elapsedTime();
5760
5761 ReferenceProcessor* rp = _ref_processor_stw;
5762 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5763
5764 // Now enqueue any remaining on the discovered lists on to
5765 // the pending list.
5766 if (!rp->processing_is_mt()) {
5767 // Serial reference processing...
5768 rp->enqueue_discovered_references();
5769 } else {
5770 // Parallel reference enqueueing
|