< prev index next >

src/share/vm/gc/parallel/psPromotionManager.inline.hpp

Print this page




 194 
 195         // This is the promotion failed test, and code handling.
 196         // The code belongs here for two reasons. It is slightly
 197         // different than the code below, and cannot share the
 198         // CAS testing code. Keeping the code here also minimizes
 199         // the impact on the common case fast path code.
 200 
 201         if (new_obj == NULL) {
 202           _old_gen_is_full = true;
 203           return oop_promotion_failed(o, test_mark);
 204         }
 205       }
 206     }
 207 
 208     assert(new_obj != NULL, "allocation should have succeeded");
 209 
 210     // Copy obj
 211     Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
 212 
 213     // Now we have to CAS in the header.
 214     if (o->cas_forward_to(new_obj, test_mark)) {
 215       // We won any races, we "own" this object.
 216       assert(new_obj == o->forwardee(), "Sanity");
 217 
 218       // Increment age if obj still in new generation. Now that
 219       // we're dealing with a markOop that cannot change, it is
 220       // okay to use the non mt safe oop methods.
 221       if (!new_obj_is_tenured) {
 222         new_obj->incr_age();
 223         assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
 224       }
 225 
 226       // Do the size comparison first with new_obj_size, which we
 227       // already have. Hopefully, only a few objects are larger than
 228       // _min_array_size_for_chunking, and most of them will be arrays.
 229       // So, the is->objArray() test would be very infrequent.
 230       if (new_obj_size > _min_array_size_for_chunking &&
 231           new_obj->is_objArray() &&
 232           PSChunkLargeArrays) {
 233         // we'll chunk it
 234         oop* const masked_o = mask_chunked_array_oop(o);
 235         push_depth(masked_o);
 236         TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
 237       } else {
 238         // we'll just push its contents
 239         push_contents(new_obj);
 240       }









 241     }  else {
 242       // We lost, someone else "owns" this object
 243       guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
 244 
 245       // Try to deallocate the space.  If it was directly allocated we cannot
 246       // deallocate it, so we have to test.  If the deallocation fails,
 247       // overwrite with a filler object.
 248       if (new_obj_is_tenured) {
 249         if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 250           CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 251         }
 252       } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 253         CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 254       }
 255 
 256       // don't update this before the unallocation!
 257       new_obj = o->forwardee();








 258     }
 259   } else {
 260     assert(o->is_forwarded(), "Sanity");
 261     new_obj = o->forwardee();
 262   }
 263 
 264   // This code must come after the CAS test, or it will print incorrect
 265   // information.
 266   log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
 267                                   should_scavenge(&new_obj) ? "copying" : "tenuring",
 268                                   new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());



 269 
 270   return new_obj;
 271 }
 272 
 273 // Attempt to "claim" oop at p via CAS, push the new obj if successful
 274 // This version tests the oop* to make sure it is within the heap before
 275 // attempting marking.
 276 template <class T, bool promote_immediately>
 277 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
 278   assert(should_scavenge(p, true), "revisiting object?");
 279 
 280   oop o = oopDesc::load_decode_heap_oop_not_null(p);
 281   oop new_obj = o->is_forwarded()
 282         ? o->forwardee()
 283         : copy_to_survivor_space<promote_immediately>(o);
 284 
 285   // This code must come after the CAS test, or it will print incorrect
 286   // information.
 287   if (log_develop_is_enabled(Trace, gc, scavenge) && o->is_forwarded()) {
 288     log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
 289                       "forwarding",
 290                       new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());



 291   }
 292 
 293   oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 294 
 295   // We cannot mark without test, as some code passes us pointers
 296   // that are outside the heap. These pointers are either from roots
 297   // or from metadata.
 298   if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
 299       ParallelScavengeHeap::heap()->is_in_reserved(p)) {
 300     if (PSScavenge::is_obj_in_young(new_obj)) {
 301       PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
 302     }
 303   }
 304 }
 305 
 306 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
 307   if (is_oop_masked(p)) {
 308     assert(PSChunkLargeArrays, "invariant");
 309     oop const old = unmask_chunked_array_oop(p);
 310     process_array_chunk(old);




 194 
 195         // This is the promotion failed test, and code handling.
 196         // The code belongs here for two reasons. It is slightly
 197         // different than the code below, and cannot share the
 198         // CAS testing code. Keeping the code here also minimizes
 199         // the impact on the common case fast path code.
 200 
 201         if (new_obj == NULL) {
 202           _old_gen_is_full = true;
 203           return oop_promotion_failed(o, test_mark);
 204         }
 205       }
 206     }
 207 
 208     assert(new_obj != NULL, "allocation should have succeeded");
 209 
 210     // Copy obj
 211     Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
 212 
 213     // Now we have to CAS in the header.
 214     if (o->cas_forward_to(new_obj, test_mark, memory_order_relaxed)) {
 215       // We won any races, we "own" this object.
 216       assert(new_obj == o->forwardee(), "Sanity");
 217 
 218       // Increment age if obj still in new generation. Now that
 219       // we're dealing with a markOop that cannot change, it is
 220       // okay to use the non mt safe oop methods.
 221       if (!new_obj_is_tenured) {
 222         new_obj->incr_age();
 223         assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
 224       }
 225 
 226       // Do the size comparison first with new_obj_size, which we
 227       // already have. Hopefully, only a few objects are larger than
 228       // _min_array_size_for_chunking, and most of them will be arrays.
 229       // So, the is->objArray() test would be very infrequent.
 230       if (new_obj_size > _min_array_size_for_chunking &&
 231           new_obj->is_objArray() &&
 232           PSChunkLargeArrays) {
 233         // we'll chunk it
 234         oop* const masked_o = mask_chunked_array_oop(o);
 235         push_depth(masked_o);
 236         TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
 237       } else {
 238         // we'll just push its contents
 239         push_contents(new_obj);
 240       }
 241 
 242       // This code must come after the CAS test, or it will print incorrect
 243       // information.
 244       if (log_develop_is_enabled(Trace, gc, scavenge)) {
 245         log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
 246                                         should_scavenge(&new_obj) ? "copying" : "tenuring",
 247                                         new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj),
 248                                         new_obj->size())
 249       }
 250     }  else {
 251       // We lost, someone else "owns" this object
 252       guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
 253 
 254       // Try to deallocate the space.  If it was directly allocated we cannot
 255       // deallocate it, so we have to test.  If the deallocation fails,
 256       // overwrite with a filler object.
 257       if (new_obj_is_tenured) {
 258         if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 259           CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 260         }
 261       } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 262         CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 263       }
 264 
 265       // don't update this before the unallocation!
 266       new_obj = o->forwardee();
 267 
 268       // fields in new_obj may not be synchronized.
 269       if (log_develop_is_enabled(Trace, gc, scavenge)) {
 270         log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT "}",
 271                                         should_scavenge(&new_obj) ? "copying" : "tenuring",
 272                                         o->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj));
 273       }
 274       new_obj = NULL;
 275     }
 276   } else {
 277     assert(o->is_forwarded(), "Sanity");
 278     new_obj = o->forwardee();
 279     // fields in new_obj may not be synchronized.
 280     if (log_develop_is_enabled(Trace, gc, scavenge)) {
 281       log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT "}",


 282                                       should_scavenge(&new_obj) ? "copying" : "tenuring",
 283                                       o->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj));
 284     }
 285     new_obj = NULL;
 286   }
 287 
 288   return new_obj;
 289 }
 290 
 291 // Attempt to "claim" oop at p via CAS, push the new obj if successful
 292 // This version tests the oop* to make sure it is within the heap before
 293 // attempting marking.
 294 template <class T, bool promote_immediately>
 295 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
 296   assert(should_scavenge(p, true), "revisiting object?");
 297 
 298   oop o = oopDesc::load_decode_heap_oop_not_null(p);



 299 
 300   if (!o->is_forwarded()) {
 301     copy_to_survivor_space<promote_immediately>(o);
 302   }
 303   oop new_obj = o->forwardee();
 304   assert(forwardee != NULL, "forwardee should not be NULL");
 305   // This code must come after the CAS test, or it will print incorrect information.
 306   if (log_develop_is_enabled(Trace, gc, scavenge)) {
 307     log_develop_trace(gc, scavenge)("{forwarding %s " PTR_FORMAT " -> " PTR_FORMAT "}",
 308                       o->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj));
 309   }
 310 
 311   oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 312 
 313   // We cannot mark without test, as some code passes us pointers
 314   // that are outside the heap. These pointers are either from roots
 315   // or from metadata.
 316   if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
 317       ParallelScavengeHeap::heap()->is_in_reserved(p)) {
 318     if (PSScavenge::is_obj_in_young(new_obj)) {
 319       PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
 320     }
 321   }
 322 }
 323 
 324 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
 325   if (is_oop_masked(p)) {
 326     assert(PSChunkLargeArrays, "invariant");
 327     oop const old = unmask_chunked_array_oop(p);
 328     process_array_chunk(old);


< prev index next >