< prev index next >

src/hotspot/share/gc/g1/g1OopClosures.inline.hpp

Print this page
rev 52719 : [mq]: 8159440-marking-of-promoted-objects-to-concurrent


 191   if (state.is_in_cset()) {
 192     prefetch_and_push(p, obj);
 193   } else if (!HeapRegion::is_in_same_region(p, obj)) {
 194     handle_non_cset_obj_common(state, p, obj);
 195   }
 196 }
 197 
 198 void G1ParCopyHelper::do_cld_barrier(oop new_obj) {
 199   if (_g1h->heap_region_containing(new_obj)->is_young()) {
 200     _scanned_cld->record_modified_oops();
 201   }
 202 }
 203 
 204 void G1ParCopyHelper::mark_object(oop obj) {
 205   assert(!_g1h->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 206 
 207   // We know that the object is not moving so it's safe to read its size.
 208   _cm->mark_in_next_bitmap(_worker_id, obj);
 209 }
 210 
 211 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
 212   assert(from_obj->is_forwarded(), "from obj should be forwarded");
 213   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
 214   assert(from_obj != to_obj, "should not be self-forwarded");
 215 
 216   assert(_g1h->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
 217   assert(!_g1h->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
 218 
 219   // The object might be in the process of being copied by another
 220   // worker so we cannot trust that its to-space image is
 221   // well-formed. So we have to read its size from its from-space
 222   // image which we know should not be changing.
 223   _cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size());
 224 }
 225 
 226 void G1ParCopyHelper::trim_queue_partially() {
 227   _par_scan_state->trim_queue_partially();
 228 }
 229 
 230 template <G1Barrier barrier, G1Mark do_mark_object>
 231 template <class T>
 232 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
 233   T heap_oop = RawAccess<>::oop_load(p);
 234 
 235   if (CompressedOops::is_null(heap_oop)) {
 236     return;
 237   }
 238 
 239   oop obj = CompressedOops::decode_not_null(heap_oop);
 240 
 241   assert(_worker_id == _par_scan_state->worker_id(), "sanity");
 242 
 243   const InCSetState state = _g1h->in_cset_state(obj);
 244   if (state.is_in_cset()) {
 245     oop forwardee;
 246     markOop m = obj->mark_raw();
 247     if (m->is_marked()) {
 248       forwardee = (oop) m->decode_pointer();
 249     } else {
 250       forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
 251     }
 252     assert(forwardee != NULL, "forwardee should not be NULL");
 253     RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);
 254     if (do_mark_object != G1MarkNone && forwardee != obj) {
 255       // If the object is self-forwarded we don't need to explicitly
 256       // mark it, the evacuation failure protocol will do so.
 257       mark_forwarded_object(obj, forwardee);
 258     }
 259 
 260     if (barrier == G1BarrierCLD) {
 261       do_cld_barrier(forwardee);
 262     }
 263   } else {
 264     if (state.is_humongous()) {
 265       _g1h->set_humongous_is_live(obj);
 266     }
 267 
 268     // The object is not in collection set. If we're a root scanning
 269     // closure during an initial mark pause then attempt to mark the object.
 270     if (do_mark_object == G1MarkFromRoot) {
 271       mark_object(obj);
 272     }
 273   }
 274   trim_queue_partially();
 275 }
 276 
 277 template <class T> void G1RebuildRemSetClosure::do_oop_work(T* p) {
 278   oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);


 191   if (state.is_in_cset()) {
 192     prefetch_and_push(p, obj);
 193   } else if (!HeapRegion::is_in_same_region(p, obj)) {
 194     handle_non_cset_obj_common(state, p, obj);
 195   }
 196 }
 197 
 198 void G1ParCopyHelper::do_cld_barrier(oop new_obj) {
 199   if (_g1h->heap_region_containing(new_obj)->is_young()) {
 200     _scanned_cld->record_modified_oops();
 201   }
 202 }
 203 
 204 void G1ParCopyHelper::mark_object(oop obj) {
 205   assert(!_g1h->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 206 
 207   // We know that the object is not moving so it's safe to read its size.
 208   _cm->mark_in_next_bitmap(_worker_id, obj);
 209 }
 210 















 211 void G1ParCopyHelper::trim_queue_partially() {
 212   _par_scan_state->trim_queue_partially();
 213 }
 214 
 215 template <G1Barrier barrier, G1Mark do_mark_object>
 216 template <class T>
 217 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
 218   T heap_oop = RawAccess<>::oop_load(p);
 219 
 220   if (CompressedOops::is_null(heap_oop)) {
 221     return;
 222   }
 223 
 224   oop obj = CompressedOops::decode_not_null(heap_oop);
 225 
 226   assert(_worker_id == _par_scan_state->worker_id(), "sanity");
 227 
 228   const InCSetState state = _g1h->in_cset_state(obj);
 229   if (state.is_in_cset()) {
 230     oop forwardee;
 231     markOop m = obj->mark_raw();
 232     if (m->is_marked()) {
 233       forwardee = (oop) m->decode_pointer();
 234     } else {
 235       forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
 236     }
 237     assert(forwardee != NULL, "forwardee should not be NULL");
 238     RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);





 239 
 240     if (barrier == G1BarrierCLD) {
 241       do_cld_barrier(forwardee);
 242     }
 243   } else {
 244     if (state.is_humongous()) {
 245       _g1h->set_humongous_is_live(obj);
 246     }
 247 
 248     // The object is not in collection set. If we're a root scanning
 249     // closure during an initial mark pause then attempt to mark the object.
 250     if (do_mark_object == G1MarkFromRoot) {
 251       mark_object(obj);
 252     }
 253   }
 254   trim_queue_partially();
 255 }
 256 
 257 template <class T> void G1RebuildRemSetClosure::do_oop_work(T* p) {
 258   oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);
< prev index next >