< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7793 : 8073315: Enable gcc -Wtype-limits and fix upcoming issues.


5403     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5404       // We also need to mark copied objects.
5405       copy_non_heap_cl = &copy_mark_non_heap_cl;
5406     }
5407 
5408     // Is alive closure
5409     G1AlwaysAliveClosure always_alive(_g1h);
5410 
5411     // Copying keep alive closure. Applied to referent objects that need
5412     // to be copied.
5413     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5414 
5415     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5416 
5417     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5418     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5419 
5420     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5421     // So this must be true - but assert just in case someone decides to
5422     // change the worker ids.
5423     assert(0 <= worker_id && worker_id < limit, "sanity");
5424     assert(!rp->discovery_is_atomic(), "check this code");
5425 
5426     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5427     for (uint idx = worker_id; idx < limit; idx += stride) {
5428       DiscoveredList& ref_list = rp->discovered_refs()[idx];
5429 
5430       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5431       while (iter.has_next()) {
5432         // Since discovery is not atomic for the CM ref processor, we
5433         // can see some null referent objects.
5434         iter.load_ptrs(DEBUG_ONLY(true));
5435         oop ref = iter.obj();
5436 
5437         // This will filter nulls.
5438         if (iter.is_referent_alive()) {
5439           iter.make_referent_alive();
5440         }
5441         iter.move_to_next();
5442       }
5443     }




5403     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5404       // We also need to mark copied objects.
5405       copy_non_heap_cl = &copy_mark_non_heap_cl;
5406     }
5407 
5408     // Is alive closure
5409     G1AlwaysAliveClosure always_alive(_g1h);
5410 
5411     // Copying keep alive closure. Applied to referent objects that need
5412     // to be copied.
5413     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5414 
5415     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5416 
5417     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5418     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5419 
5420     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5421     // So this must be true - but assert just in case someone decides to
5422     // change the worker ids.
5423     assert(worker_id < limit, "sanity");
5424     assert(!rp->discovery_is_atomic(), "check this code");
5425 
5426     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5427     for (uint idx = worker_id; idx < limit; idx += stride) {
5428       DiscoveredList& ref_list = rp->discovered_refs()[idx];
5429 
5430       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5431       while (iter.has_next()) {
5432         // Since discovery is not atomic for the CM ref processor, we
5433         // can see some null referent objects.
5434         iter.load_ptrs(DEBUG_ONLY(true));
5435         oop ref = iter.obj();
5436 
5437         // This will filter nulls.
5438         if (iter.is_referent_alive()) {
5439           iter.make_referent_alive();
5440         }
5441         iter.move_to_next();
5442       }
5443     }


< prev index next >