< prev index next >

src/hotspot/share/gc/g1/g1OopClosures.inline.hpp

Print this page
rev 49826 : imported patch 6672778-partial-queue-trimming
rev 49827 : imported patch 6672778-refactoring


  50   Prefetch::read(obj->mark_addr_raw(), (HeapWordSize*2));
  51 
  52   // slightly paranoid test; I'm trying to catch potential
  53   // problems before we go into push_on_queue to know where the
  54   // problem is coming from
  55   assert((obj == RawAccess<>::oop_load(p)) ||
  56          (obj->is_forwarded() &&
  57          obj->forwardee() == RawAccess<>::oop_load(p)),
  58          "p should still be pointing to obj or to its forwardee");
  59 
  60   _par_scan_state->push_on_queue(p);
  61 }
  62 
  63 template <class T>
  64 inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) {
  65   if (state.is_humongous()) {
  66     _g1h->set_humongous_is_live(obj);
  67   }
  68 }
  69 




  70 template <class T>
  71 inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) {
  72   T heap_oop = RawAccess<>::oop_load(p);
  73 
  74   if (CompressedOops::is_null(heap_oop)) {
  75     return;
  76   }
  77   oop obj = CompressedOops::decode_not_null(heap_oop);
  78   const InCSetState state = _g1h->in_cset_state(obj);
  79   if (state.is_in_cset()) {
  80     prefetch_and_push(p, obj);
  81   } else {
  82     if (HeapRegion::is_in_same_region(p, obj)) {
  83       return;
  84     }
  85     handle_non_cset_obj_common(state, p, obj);
  86     _par_scan_state->update_rs(_from, p, obj);
  87   }
  88 }
  89 


 208 
 209   // We know that the object is not moving so it's safe to read its size.
 210   _cm->mark_in_next_bitmap(_worker_id, obj);
 211 }
 212 
 213 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
 214   assert(from_obj->is_forwarded(), "from obj should be forwarded");
 215   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
 216   assert(from_obj != to_obj, "should not be self-forwarded");
 217 
 218   assert(_g1h->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
 219   assert(!_g1h->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
 220 
 221   // The object might be in the process of being copied by another
 222   // worker so we cannot trust that its to-space image is
 223   // well-formed. So we have to read its size from its from-space
 224   // image which we know should not be changing.
 225   _cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size());
 226 }
 227 




 228 template <G1Barrier barrier, G1Mark do_mark_object>
 229 template <class T>
 230 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
 231   T heap_oop = RawAccess<>::oop_load(p);
 232 
 233   if (CompressedOops::is_null(heap_oop)) {
 234     return;
 235   }
 236 
 237   oop obj = CompressedOops::decode_not_null(heap_oop);
 238 
 239   assert(_worker_id == _par_scan_state->worker_id(), "sanity");
 240 
 241   const InCSetState state = _g1h->in_cset_state(obj);
 242   if (state.is_in_cset()) {
 243     oop forwardee;
 244     markOop m = obj->mark_raw();
 245     if (m->is_marked()) {
 246       forwardee = (oop) m->decode_pointer();
 247     } else {


 252     if (do_mark_object != G1MarkNone && forwardee != obj) {
 253       // If the object is self-forwarded we don't need to explicitly
 254       // mark it, the evacuation failure protocol will do so.
 255       mark_forwarded_object(obj, forwardee);
 256     }
 257 
 258     if (barrier == G1BarrierCLD) {
 259       do_cld_barrier(forwardee);
 260     }
 261   } else {
 262     if (state.is_humongous()) {
 263       _g1h->set_humongous_is_live(obj);
 264     }
 265 
 266     // The object is not in collection set. If we're a root scanning
 267     // closure during an initial mark pause then attempt to mark the object.
 268     if (do_mark_object == G1MarkFromRoot) {
 269       mark_object(obj);
 270     }
 271   }

 272 }
 273 
 274 template <class T> void G1RebuildRemSetClosure::do_oop_nv(T* p) {
 275   oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);
 276   if (obj == NULL) {
 277     return;
 278   }
 279 
 280   if (HeapRegion::is_in_same_region(p, obj)) {
 281     return;
 282   }
 283 
 284   HeapRegion* to = _g1h->heap_region_containing(obj);
 285   HeapRegionRemSet* rem_set = to->rem_set();
 286   rem_set->add_reference(p, _worker_id);
 287 }
 288 
 289 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP


  50   Prefetch::read(obj->mark_addr_raw(), (HeapWordSize*2));
  51 
  52   // slightly paranoid test; I'm trying to catch potential
  53   // problems before we go into push_on_queue to know where the
  54   // problem is coming from
  55   assert((obj == RawAccess<>::oop_load(p)) ||
  56          (obj->is_forwarded() &&
  57          obj->forwardee() == RawAccess<>::oop_load(p)),
  58          "p should still be pointing to obj or to its forwardee");
  59 
  60   _par_scan_state->push_on_queue(p);
  61 }
  62 
  63 template <class T>
  64 inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) {
  65   if (state.is_humongous()) {
  66     _g1h->set_humongous_is_live(obj);
  67   }
  68 }
  69 
  70 inline void G1ScanClosureBase::trim_queue_partially() {
  71   _par_scan_state->trim_queue_partially();
  72 }
  73 
  74 template <class T>
  75 inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) {
  76   T heap_oop = RawAccess<>::oop_load(p);
  77 
  78   if (CompressedOops::is_null(heap_oop)) {
  79     return;
  80   }
  81   oop obj = CompressedOops::decode_not_null(heap_oop);
  82   const InCSetState state = _g1h->in_cset_state(obj);
  83   if (state.is_in_cset()) {
  84     prefetch_and_push(p, obj);
  85   } else {
  86     if (HeapRegion::is_in_same_region(p, obj)) {
  87       return;
  88     }
  89     handle_non_cset_obj_common(state, p, obj);
  90     _par_scan_state->update_rs(_from, p, obj);
  91   }
  92 }
  93 


 212 
 213   // We know that the object is not moving so it's safe to read its size.
 214   _cm->mark_in_next_bitmap(_worker_id, obj);
 215 }
 216 
 217 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
 218   assert(from_obj->is_forwarded(), "from obj should be forwarded");
 219   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
 220   assert(from_obj != to_obj, "should not be self-forwarded");
 221 
 222   assert(_g1h->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
 223   assert(!_g1h->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
 224 
 225   // The object might be in the process of being copied by another
 226   // worker so we cannot trust that its to-space image is
 227   // well-formed. So we have to read its size from its from-space
 228   // image which we know should not be changing.
 229   _cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size());
 230 }
 231 
 232 void G1ParCopyHelper::trim_queue_partially() {
 233   _par_scan_state->trim_queue_partially();
 234 }
 235 
 236 template <G1Barrier barrier, G1Mark do_mark_object>
 237 template <class T>
 238 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
 239   T heap_oop = RawAccess<>::oop_load(p);
 240 
 241   if (CompressedOops::is_null(heap_oop)) {
 242     return;
 243   }
 244 
 245   oop obj = CompressedOops::decode_not_null(heap_oop);
 246 
 247   assert(_worker_id == _par_scan_state->worker_id(), "sanity");
 248 
 249   const InCSetState state = _g1h->in_cset_state(obj);
 250   if (state.is_in_cset()) {
 251     oop forwardee;
 252     markOop m = obj->mark_raw();
 253     if (m->is_marked()) {
 254       forwardee = (oop) m->decode_pointer();
 255     } else {


 260     if (do_mark_object != G1MarkNone && forwardee != obj) {
 261       // If the object is self-forwarded we don't need to explicitly
 262       // mark it, the evacuation failure protocol will do so.
 263       mark_forwarded_object(obj, forwardee);
 264     }
 265 
 266     if (barrier == G1BarrierCLD) {
 267       do_cld_barrier(forwardee);
 268     }
 269   } else {
 270     if (state.is_humongous()) {
 271       _g1h->set_humongous_is_live(obj);
 272     }
 273 
 274     // The object is not in collection set. If we're a root scanning
 275     // closure during an initial mark pause then attempt to mark the object.
 276     if (do_mark_object == G1MarkFromRoot) {
 277       mark_object(obj);
 278     }
 279   }
 280   trim_queue_partially();
 281 }
 282 
 283 template <class T> void G1RebuildRemSetClosure::do_oop_nv(T* p) {
 284   oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);
 285   if (obj == NULL) {
 286     return;
 287   }
 288 
 289   if (HeapRegion::is_in_same_region(p, obj)) {
 290     return;
 291   }
 292 
 293   HeapRegion* to = _g1h->heap_region_containing(obj);
 294   HeapRegionRemSet* rem_set = to->rem_set();
 295   rem_set->add_reference(p, _worker_id);
 296 }
 297 
 298 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
< prev index next >