5608
5609 // Copying keep alive closure. Applied to referent objects that need
5610 // to be copied.
5611 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5612
5613 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5614
5615 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5616 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5617
5618 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5619 // So this must be true - but assert just in case someone decides to
5620 // change the worker ids.
5621 assert(0 <= worker_id && worker_id < limit, "sanity");
5622 assert(!rp->discovery_is_atomic(), "check this code");
5623
5624 // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5625 for (uint idx = worker_id; idx < limit; idx += stride) {
5626 DiscoveredList& ref_list = rp->discovered_refs()[idx];
5627
5628 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5629 while (iter.has_next()) {
5630 // Since discovery is not atomic for the CM ref processor, we
5631 // can see some null referent objects.
5632 iter.load_ptrs(DEBUG_ONLY(true));
5633 oop ref = iter.obj();
5634
5635 // This will filter nulls.
5636 if (iter.is_referent_alive()) {
5637 iter.make_referent_alive();
5638 }
5639 iter.move_to_next();
5640 }
5641 }
5642
5643 // Drain the queue - which may cause stealing
5644 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5645 drain_queue.do_void();
5646 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5647 assert(pss.refs()->is_empty(), "should be");
5648 }
|
5608
5609 // Copying keep alive closure. Applied to referent objects that need
5610 // to be copied.
5611 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5612
5613 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5614
5615 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5616 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5617
5618 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5619 // So this must be true - but assert just in case someone decides to
5620 // change the worker ids.
5621 assert(0 <= worker_id && worker_id < limit, "sanity");
5622 assert(!rp->discovery_is_atomic(), "check this code");
5623
5624 // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5625 for (uint idx = worker_id; idx < limit; idx += stride) {
5626 DiscoveredList& ref_list = rp->discovered_refs()[idx];
5627
5628 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive,
5629 false /* discovered_list_needs_barrier */);
5630 while (iter.has_next()) {
5631 // Since discovery is not atomic for the CM ref processor, we
5632 // can see some null referent objects.
5633 iter.load_ptrs(DEBUG_ONLY(true));
5634 oop ref = iter.obj();
5635
5636 // This will filter nulls.
5637 if (iter.is_referent_alive()) {
5638 iter.make_referent_alive();
5639 }
5640 iter.move_to_next();
5641 }
5642 }
5643
5644 // Drain the queue - which may cause stealing
5645 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5646 drain_queue.do_void();
5647 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5648 assert(pss.refs()->is_empty(), "should be");
5649 }
|