< prev index next >

src/share/vm/gc/parallel/psPromotionManager.inline.hpp

Print this page




 194 
 195         // This is the promotion failed test, and code handling.
 196         // The code belongs here for two reasons. It is slightly
 197         // different than the code below, and cannot share the
 198         // CAS testing code. Keeping the code here also minimizes
 199         // the impact on the common case fast path code.
 200 
 201         if (new_obj == NULL) {
 202           _old_gen_is_full = true;
 203           return oop_promotion_failed(o, test_mark);
 204         }
 205       }
 206     }
 207 
 208     assert(new_obj != NULL, "allocation should have succeeded");
 209 
 210     // Copy obj
 211     Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
 212 
 213     // Now we have to CAS in the header.
 214     if (o->cas_forward_to(new_obj, test_mark)) {
 215       // We won any races, we "own" this object.
 216       assert(new_obj == o->forwardee(), "Sanity");
 217 
 218       // Increment age if obj still in new generation. Now that
 219       // we're dealing with a markOop that cannot change, it is
 220       // okay to use the non mt safe oop methods.
 221       if (!new_obj_is_tenured) {
 222         new_obj->incr_age();
 223         assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
 224       }
 225 
 226       // Do the size comparison first with new_obj_size, which we
 227       // already have. Hopefully, only a few objects are larger than
 228       // _min_array_size_for_chunking, and most of them will be arrays.
 229       // So, the is->objArray() test would be very infrequent.
 230       if (new_obj_size > _min_array_size_for_chunking &&
 231           new_obj->is_objArray() &&
 232           PSChunkLargeArrays) {
 233         // we'll chunk it
 234         oop* const masked_o = mask_chunked_array_oop(o);
 235         push_depth(masked_o);
 236         TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
 237       } else {
 238         // we'll just push its contents
 239         push_contents(new_obj);
 240       }







 241     }  else {
 242       // We lost, someone else "owns" this object
 243       guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
 244 
 245       // Try to deallocate the space.  If it was directly allocated we cannot
 246       // deallocate it, so we have to test.  If the deallocation fails,
 247       // overwrite with a filler object.
 248       if (new_obj_is_tenured) {
 249         if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 250           CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 251         }
 252       } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 253         CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 254       }
 255 
 256       // don't update this before the unallocation!
 257       new_obj = o->forwardee();





 258     }
 259   } else {
 260     assert(o->is_forwarded(), "Sanity");
 261     new_obj = o->forwardee();
 262   }
 263 
 264   // This code must come after the CAS test, or it will print incorrect
 265   // information.
 266   log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
 267                                   should_scavenge(&new_obj) ? "copying" : "tenuring",
 268                                   new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());

 269 
 270   return new_obj;
 271 }
 272 
 273 // Attempt to "claim" oop at p via CAS, push the new obj if successful
 274 // This version tests the oop* to make sure it is within the heap before
 275 // attempting marking.
 276 template <class T, bool promote_immediately>
 277 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
 278   assert(should_scavenge(p, true), "revisiting object?");
 279 
 280   oop o = oopDesc::load_decode_heap_oop_not_null(p);
 281   oop new_obj = o->is_forwarded()
 282         ? o->forwardee()
 283         : copy_to_survivor_space<promote_immediately>(o);
 284 










 285   // This code must come after the CAS test, or it will print incorrect
 286   // information.
 287   if (log_develop_is_enabled(Trace, gc, scavenge) && o->is_forwarded()) {
 288     log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
 289                       "forwarding",
 290                       new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());


 291   }
 292 
 293   oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 294 
 295   // We cannot mark without test, as some code passes us pointers
 296   // that are outside the heap. These pointers are either from roots
 297   // or from metadata.
 298   if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
 299       ParallelScavengeHeap::heap()->is_in_reserved(p)) {
 300     if (PSScavenge::is_obj_in_young(new_obj)) {
 301       PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
 302     }
 303   }
 304 }
 305 
 306 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
 307   if (is_oop_masked(p)) {
 308     assert(PSChunkLargeArrays, "invariant");
 309     oop const old = unmask_chunked_array_oop(p);
 310     process_array_chunk(old);




 194 
 195         // This is the promotion failed test, and code handling.
 196         // The code belongs here for two reasons. It is slightly
 197         // different than the code below, and cannot share the
 198         // CAS testing code. Keeping the code here also minimizes
 199         // the impact on the common case fast path code.
 200 
 201         if (new_obj == NULL) {
 202           _old_gen_is_full = true;
 203           return oop_promotion_failed(o, test_mark);
 204         }
 205       }
 206     }
 207 
 208     assert(new_obj != NULL, "allocation should have succeeded");
 209 
 210     // Copy obj
 211     Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
 212 
 213     // Now we have to CAS in the header.
 214     if (o->cas_forward_to(new_obj, test_mark, memory_order_relaxed)) {
 215       // We won any races, we "own" this object.
 216       assert(new_obj == o->forwardee(), "Sanity");
 217 
 218       // Increment age if obj still in new generation. Now that
 219       // we're dealing with a markOop that cannot change, it is
 220       // okay to use the non mt safe oop methods.
 221       if (!new_obj_is_tenured) {
 222         new_obj->incr_age();
 223         assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
 224       }
 225 
 226       // Do the size comparison first with new_obj_size, which we
 227       // already have. Hopefully, only a few objects are larger than
 228       // _min_array_size_for_chunking, and most of them will be arrays.
 229       // So, the is->objArray() test would be very infrequent.
 230       if (new_obj_size > _min_array_size_for_chunking &&
 231           new_obj->is_objArray() &&
 232           PSChunkLargeArrays) {
 233         // we'll chunk it
 234         oop* const masked_o = mask_chunked_array_oop(o);
 235         push_depth(masked_o);
 236         TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
 237       } else {
 238         // we'll just push its contents
 239         push_contents(new_obj);
 240       }
 241 
 242       // This code must come after the CAS test, or it will print incorrect
 243       // information.
 244       log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
 245                                       should_scavenge(&new_obj) ? "copying" : "tenuring",
 246                                       new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj),
 247                                       new_obj->size());
 248     }  else {
 249       // We lost, someone else "owns" this object
 250       guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
 251 
 252       // Try to deallocate the space.  If it was directly allocated we cannot
 253       // deallocate it, so we have to test.  If the deallocation fails,
 254       // overwrite with a filler object.
 255       if (new_obj_is_tenured) {
 256         if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 257           CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 258         }
 259       } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
 260         CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
 261       }
 262 
 263       // don't update this before the unallocation!
 264       new_obj = o->forwardee();
 265 
 266       // fields in new_obj may not be synchronized.
 267       log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT "}",
 268                                       should_scavenge(&new_obj) ? "copying" : "tenuring",
 269                                       o->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj));
 270     }
 271   } else {
 272     assert(o->is_forwarded(), "Sanity");
 273     new_obj = o->forwardee();
 274     // fields in new_obj may not be synchronized.
 275     log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT "}",



 276                                     should_scavenge(&new_obj) ? "copying" : "tenuring",
 277                                     o->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj));
 278   }
 279 
 280   return new_obj;
 281 }
 282 
 283 // Attempt to "claim" oop at p via CAS, push the new obj if successful
 284 // This version tests the oop* to make sure it is within the heap before
 285 // attempting marking.
 286 template <class T, bool promote_immediately>
 287 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
 288   assert(should_scavenge(p, true), "revisiting object?");
 289 
 290   oop o = oopDesc::load_decode_heap_oop_not_null(p);
 291   oop new_obj;


 292 
 293   if (o->is_forwarded()) {
 294     new_obj = o->forwardee();
 295     // fields in new_obj may not be synchronized.
 296     if (log_develop_is_enabled(Trace, gc, scavenge) && o->is_forwarded()) {
 297       log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT "}",
 298                         "forwarding",
 299                         o->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj));
 300     }
 301   } else {
 302     new_obj = copy_to_survivor_space<promote_immediately>(o);
 303     // This code must come after the CAS test, or it will print incorrect
 304     // information.
 305     if (log_develop_is_enabled(Trace, gc, scavenge) && o->is_forwarded()) {
 306       log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
 307                         "forwarding",
 308                         new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj),
 309                         new_obj->size());
 310     }
 311   }
 312 
 313   oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 314 
 315   // We cannot mark without test, as some code passes us pointers
 316   // that are outside the heap. These pointers are either from roots
 317   // or from metadata.
 318   if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
 319       ParallelScavengeHeap::heap()->is_in_reserved(p)) {
 320     if (PSScavenge::is_obj_in_young(new_obj)) {
 321       PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
 322     }
 323   }
 324 }
 325 
 326 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
 327   if (is_oop_masked(p)) {
 328     assert(PSChunkLargeArrays, "invariant");
 329     oop const old = unmask_chunked_array_oop(p);
 330     process_array_chunk(old);


< prev index next >