4617 assert(_g1h->obj_in_cs(p),
4618 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4619 } else {
4620 oop p = oopDesc::load_decode_heap_oop(ref);
4621 assert(_g1h->is_in_g1_reserved(p),
4622 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4623 }
4624 return true;
4625 }
4626
4627 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4628 if (ref.is_narrow()) {
4629 return verify_ref((narrowOop*) ref);
4630 } else {
4631 return verify_ref((oop*) ref);
4632 }
4633 }
4634 #endif // ASSERT
4635
4636 void G1ParScanThreadState::trim_queue() {
4637 assert(_evac_cl != NULL, "not set");
4638 assert(_evac_failure_cl != NULL, "not set");
4639 assert(_partial_scan_cl != NULL, "not set");
4640
4641 StarTask ref;
4642 do {
4643 // Drain the overflow stack first, so other threads can steal.
4644 while (refs()->pop_overflow(ref)) {
4645 deal_with_reference(ref);
4646 }
4647
4648 while (refs()->pop_local(ref)) {
4649 deal_with_reference(ref);
4650 }
4651 } while (!refs()->is_empty());
4652 }
4653
4654 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4655 G1ParScanThreadState* par_scan_state) :
4656 _g1(g1), _par_scan_state(par_scan_state),
4657 _worker_id(par_scan_state->queue_num()) { }
4658
4659 void G1ParCopyHelper::mark_object(oop obj) {
4815 } else {
4816 // The object is not in collection set. If we're a root scanning
4817 // closure during an initial mark pause (i.e. do_mark_object will
4818 // be true) then attempt to mark the object.
4819 if (do_mark_object) {
4820 assert(_g1->is_in_g1_reserved(obj), "Must reference an object within the heap");
4821 mark_object(obj);
4822 }
4823 }
4824
4825 if (barrier == G1BarrierEvac) {
4826 assert(obj != NULL, "must be");
4827 _par_scan_state->update_rs(_from, p, _worker_id);
4828 }
4829 }
4830 }
4831
4832 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
4833 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4834
4835 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4836 assert(has_partial_array_mask(p), "invariant");
4837 oop from_obj = clear_partial_array_mask(p);
4838
4839 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
4840 assert(from_obj->is_objArray(), "must be obj array");
4841 objArrayOop from_obj_array = objArrayOop(from_obj);
4842 // The from-space object contains the real length.
4843 int length = from_obj_array->length();
4844
4845 assert(from_obj->is_forwarded(), "must be forwarded");
4846 oop to_obj = from_obj->forwardee();
4847 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
4848 objArrayOop to_obj_array = objArrayOop(to_obj);
4849 // We keep track of the next start index in the length field of the
4850 // to-space object.
4851 int next_index = to_obj_array->length();
4852 assert(0 <= next_index && next_index < length,
4853 err_msg("invariant, next index: %d, length: %d", next_index, length));
4854
4855 int start = next_index;
4856 int end = length;
4857 int remainder = end - start;
4858 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
4859 if (remainder > 2 * ParGCArrayScanChunk) {
4860 end = start + ParGCArrayScanChunk;
4861 to_obj_array->set_length(end);
4862 // Push the remainder before we process the range in case another
4863 // worker has run out of things to do and can steal it.
4864 oop* from_obj_p = set_partial_array_mask(from_obj);
4865 _par_scan_state->push_on_queue(from_obj_p);
4866 } else {
4867 assert(length == end, "sanity");
4868 // We'll process the final range for this object. Restore the length
4869 // so that the heap remains parsable in case of evacuation failure.
4870 to_obj_array->set_length(end);
4871 }
4872 _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
4873 // Process indexes [start,end). It will also process the header
4874 // along with the first chunk (i.e., the chunk with start == 0).
4875 // Note that at this point the length field of to_obj_array is not
4876 // correct given that we are using it to keep track of the next
4877 // start index. oop_iterate_range() (thankfully!) ignores the length
4878 // field and only relies on the start / end parameters. It does
4879 // however return the size of the object which will be incorrect. So
4880 // we have to ignore it even if we wanted to use it.
4881 to_obj_array->oop_iterate_range(&_scanner, start, end);
4882 }
4883
4884 class G1ParEvacuateFollowersClosure : public VoidClosure {
4885 protected:
4886 G1CollectedHeap* _g1h;
4887 G1ParScanThreadState* _par_scan_state;
4888 RefToScanQueueSet* _queues;
4889 ParallelTaskTerminator* _terminator;
4890
4891 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4892 RefToScanQueueSet* queues() { return _queues; }
4893 ParallelTaskTerminator* terminator() { return _terminator; }
4894
4895 public:
4896 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4897 G1ParScanThreadState* par_scan_state,
4898 RefToScanQueueSet* queues,
4899 ParallelTaskTerminator* terminator)
4900 : _g1h(g1h), _par_scan_state(par_scan_state),
4901 _queues(queues), _terminator(terminator) {}
4902
4903 void do_void();
5005 // both of which need setting by set_n_termination().
5006 _g1h->SharedHeap::set_n_termination(active_workers);
5007 _g1h->set_n_termination(active_workers);
5008 terminator()->reset_for_reuse(active_workers);
5009 _n_workers = active_workers;
5010 }
5011
5012 void work(uint worker_id) {
5013 if (worker_id >= _n_workers) return; // no work needed this round
5014
5015 double start_time_ms = os::elapsedTime() * 1000.0;
5016 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
5017
5018 {
5019 ResourceMark rm;
5020 HandleMark hm;
5021
5022 ReferenceProcessor* rp = _g1h->ref_processor_stw();
5023
5024 G1ParScanThreadState pss(_g1h, worker_id, rp);
5025 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
5026 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
5027 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
5028
5029 pss.set_evac_closure(&scan_evac_cl);
5030 pss.set_evac_failure_closure(&evac_failure_cl);
5031 pss.set_partial_scan_closure(&partial_scan_cl);
5032
5033 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
5034 G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
5035
5036 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
5037 G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
5038
5039 bool only_young = _g1h->g1_policy()->gcs_are_young();
5040 G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
5041 G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
5042
5043 OopClosure* scan_root_cl = &only_scan_root_cl;
5044 G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s;
5045
5046 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5047 // We also need to mark copied objects.
5048 scan_root_cl = &scan_mark_root_cl;
5049 scan_klasses_cl = &scan_mark_klasses_cl_s;
5050 }
5051
5436 public:
5437 G1STWRefProcTaskProxy(ProcessTask& proc_task,
5438 G1CollectedHeap* g1h,
5439 RefToScanQueueSet *task_queues,
5440 ParallelTaskTerminator* terminator) :
5441 AbstractGangTask("Process reference objects in parallel"),
5442 _proc_task(proc_task),
5443 _g1h(g1h),
5444 _task_queues(task_queues),
5445 _terminator(terminator)
5446 {}
5447
5448 virtual void work(uint worker_id) {
5449 // The reference processing task executed by a single worker.
5450 ResourceMark rm;
5451 HandleMark hm;
5452
5453 G1STWIsAliveClosure is_alive(_g1h);
5454
5455 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5456
5457 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5458 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5459 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5460
5461 pss.set_evac_closure(&scan_evac_cl);
5462 pss.set_evac_failure_closure(&evac_failure_cl);
5463 pss.set_partial_scan_closure(&partial_scan_cl);
5464
5465 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5466 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
5467
5468 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5469 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5470
5471 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5472 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5473
5474 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5475 // We also need to mark copied objects.
5476 copy_non_heap_cl = ©_mark_non_heap_cl;
5477 copy_metadata_cl = ©_mark_metadata_cl;
5478 }
5479
5480 // Keep alive closure.
5481 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5482
5483 // Complete GC closure
5548 protected:
5549 G1CollectedHeap* _g1h;
5550 RefToScanQueueSet *_queues;
5551 ParallelTaskTerminator _terminator;
5552 uint _n_workers;
5553
5554 public:
5555 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5556 AbstractGangTask("ParPreserveCMReferents"),
5557 _g1h(g1h),
5558 _queues(task_queues),
5559 _terminator(workers, _queues),
5560 _n_workers(workers)
5561 { }
5562
5563 void work(uint worker_id) {
5564 ResourceMark rm;
5565 HandleMark hm;
5566
5567 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5568 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5569 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5570 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5571
5572 pss.set_evac_closure(&scan_evac_cl);
5573 pss.set_evac_failure_closure(&evac_failure_cl);
5574 pss.set_partial_scan_closure(&partial_scan_cl);
5575
5576 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5577
5578
5579 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5580 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
5581
5582 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5583 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5584
5585 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5586 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5587
5588 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5589 // We also need to mark copied objects.
5590 copy_non_heap_cl = ©_mark_non_heap_cl;
5591 copy_metadata_cl = ©_mark_metadata_cl;
5592 }
5593
5594 // Is alive closure
5678 } else {
5679 keep_cm_referents.work(0);
5680 }
5681
5682 set_par_threads(0);
5683
5684 // Closure to test whether a referent is alive.
5685 G1STWIsAliveClosure is_alive(this);
5686
5687 // Even when parallel reference processing is enabled, the processing
5688 // of JNI refs is serial and performed serially by the current thread
5689 // rather than by a worker. The following PSS will be used for processing
5690 // JNI refs.
5691
5692 // Use only a single queue for this PSS.
5693 G1ParScanThreadState pss(this, 0, NULL);
5694
5695 // We do not embed a reference processor in the copying/scanning
5696 // closures while we're actually processing the discovered
5697 // reference objects.
5698 G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL);
5699 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5700 G1ParScanPartialArrayClosure partial_scan_cl(this, &pss, NULL);
5701
5702 pss.set_evac_closure(&scan_evac_cl);
5703 pss.set_evac_failure_closure(&evac_failure_cl);
5704 pss.set_partial_scan_closure(&partial_scan_cl);
5705
5706 assert(pss.refs()->is_empty(), "pre-condition");
5707
5708 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5709 G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL);
5710
5711 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5712 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5713
5714 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5715 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5716
5717 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5718 // We also need to mark copied objects.
5719 copy_non_heap_cl = ©_mark_non_heap_cl;
5720 copy_metadata_cl = ©_mark_metadata_cl;
5721 }
5722
5723 // Keep alive closure.
5724 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
|
4617 assert(_g1h->obj_in_cs(p),
4618 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4619 } else {
4620 oop p = oopDesc::load_decode_heap_oop(ref);
4621 assert(_g1h->is_in_g1_reserved(p),
4622 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
4623 }
4624 return true;
4625 }
4626
4627 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4628 if (ref.is_narrow()) {
4629 return verify_ref((narrowOop*) ref);
4630 } else {
4631 return verify_ref((oop*) ref);
4632 }
4633 }
4634 #endif // ASSERT
4635
4636 void G1ParScanThreadState::trim_queue() {
4637 assert(_evac_failure_cl != NULL, "not set");
4638
4639 StarTask ref;
4640 do {
4641 // Drain the overflow stack first, so other threads can steal.
4642 while (refs()->pop_overflow(ref)) {
4643 deal_with_reference(ref);
4644 }
4645
4646 while (refs()->pop_local(ref)) {
4647 deal_with_reference(ref);
4648 }
4649 } while (!refs()->is_empty());
4650 }
4651
4652 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4653 G1ParScanThreadState* par_scan_state) :
4654 _g1(g1), _par_scan_state(par_scan_state),
4655 _worker_id(par_scan_state->queue_num()) { }
4656
4657 void G1ParCopyHelper::mark_object(oop obj) {
4813 } else {
4814 // The object is not in collection set. If we're a root scanning
4815 // closure during an initial mark pause (i.e. do_mark_object will
4816 // be true) then attempt to mark the object.
4817 if (do_mark_object) {
4818 assert(_g1->is_in_g1_reserved(obj), "Must reference an object within the heap");
4819 mark_object(obj);
4820 }
4821 }
4822
4823 if (barrier == G1BarrierEvac) {
4824 assert(obj != NULL, "must be");
4825 _par_scan_state->update_rs(_from, p, _worker_id);
4826 }
4827 }
4828 }
4829
4830 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
4831 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4832
4833 class G1ParEvacuateFollowersClosure : public VoidClosure {
4834 protected:
4835 G1CollectedHeap* _g1h;
4836 G1ParScanThreadState* _par_scan_state;
4837 RefToScanQueueSet* _queues;
4838 ParallelTaskTerminator* _terminator;
4839
4840 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4841 RefToScanQueueSet* queues() { return _queues; }
4842 ParallelTaskTerminator* terminator() { return _terminator; }
4843
4844 public:
4845 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4846 G1ParScanThreadState* par_scan_state,
4847 RefToScanQueueSet* queues,
4848 ParallelTaskTerminator* terminator)
4849 : _g1h(g1h), _par_scan_state(par_scan_state),
4850 _queues(queues), _terminator(terminator) {}
4851
4852 void do_void();
4954 // both of which need setting by set_n_termination().
4955 _g1h->SharedHeap::set_n_termination(active_workers);
4956 _g1h->set_n_termination(active_workers);
4957 terminator()->reset_for_reuse(active_workers);
4958 _n_workers = active_workers;
4959 }
4960
4961 void work(uint worker_id) {
4962 if (worker_id >= _n_workers) return; // no work needed this round
4963
4964 double start_time_ms = os::elapsedTime() * 1000.0;
4965 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4966
4967 {
4968 ResourceMark rm;
4969 HandleMark hm;
4970
4971 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4972
4973 G1ParScanThreadState pss(_g1h, worker_id, rp);
4974 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4975
4976 pss.set_evac_failure_closure(&evac_failure_cl);
4977
4978 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
4979 G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
4980
4981 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4982 G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4983
4984 bool only_young = _g1h->g1_policy()->gcs_are_young();
4985 G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4986 G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4987
4988 OopClosure* scan_root_cl = &only_scan_root_cl;
4989 G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s;
4990
4991 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4992 // We also need to mark copied objects.
4993 scan_root_cl = &scan_mark_root_cl;
4994 scan_klasses_cl = &scan_mark_klasses_cl_s;
4995 }
4996
5381 public:
5382 G1STWRefProcTaskProxy(ProcessTask& proc_task,
5383 G1CollectedHeap* g1h,
5384 RefToScanQueueSet *task_queues,
5385 ParallelTaskTerminator* terminator) :
5386 AbstractGangTask("Process reference objects in parallel"),
5387 _proc_task(proc_task),
5388 _g1h(g1h),
5389 _task_queues(task_queues),
5390 _terminator(terminator)
5391 {}
5392
5393 virtual void work(uint worker_id) {
5394 // The reference processing task executed by a single worker.
5395 ResourceMark rm;
5396 HandleMark hm;
5397
5398 G1STWIsAliveClosure is_alive(_g1h);
5399
5400 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5401 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5402
5403 pss.set_evac_failure_closure(&evac_failure_cl);
5404
5405 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5406 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
5407
5408 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5409 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5410
5411 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5412 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5413
5414 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5415 // We also need to mark copied objects.
5416 copy_non_heap_cl = ©_mark_non_heap_cl;
5417 copy_metadata_cl = ©_mark_metadata_cl;
5418 }
5419
5420 // Keep alive closure.
5421 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5422
5423 // Complete GC closure
5488 protected:
5489 G1CollectedHeap* _g1h;
5490 RefToScanQueueSet *_queues;
5491 ParallelTaskTerminator _terminator;
5492 uint _n_workers;
5493
5494 public:
5495 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5496 AbstractGangTask("ParPreserveCMReferents"),
5497 _g1h(g1h),
5498 _queues(task_queues),
5499 _terminator(workers, _queues),
5500 _n_workers(workers)
5501 { }
5502
5503 void work(uint worker_id) {
5504 ResourceMark rm;
5505 HandleMark hm;
5506
5507 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5508 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5509
5510 pss.set_evac_failure_closure(&evac_failure_cl);
5511
5512 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5513
5514
5515 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5516 G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
5517
5518 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5519 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5520
5521 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5522 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5523
5524 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5525 // We also need to mark copied objects.
5526 copy_non_heap_cl = ©_mark_non_heap_cl;
5527 copy_metadata_cl = ©_mark_metadata_cl;
5528 }
5529
5530 // Is alive closure
5614 } else {
5615 keep_cm_referents.work(0);
5616 }
5617
5618 set_par_threads(0);
5619
5620 // Closure to test whether a referent is alive.
5621 G1STWIsAliveClosure is_alive(this);
5622
5623 // Even when parallel reference processing is enabled, the processing
5624 // of JNI refs is serial and performed serially by the current thread
5625 // rather than by a worker. The following PSS will be used for processing
5626 // JNI refs.
5627
5628 // Use only a single queue for this PSS.
5629 G1ParScanThreadState pss(this, 0, NULL);
5630
5631 // We do not embed a reference processor in the copying/scanning
5632 // closures while we're actually processing the discovered
5633 // reference objects.
5634 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5635
5636 pss.set_evac_failure_closure(&evac_failure_cl);
5637
5638 assert(pss.refs()->is_empty(), "pre-condition");
5639
5640 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5641 G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL);
5642
5643 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5644 G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5645
5646 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5647 OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
5648
5649 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5650 // We also need to mark copied objects.
5651 copy_non_heap_cl = ©_mark_non_heap_cl;
5652 copy_metadata_cl = ©_mark_metadata_cl;
5653 }
5654
5655 // Keep alive closure.
5656 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
|