< prev index next >

src/hotspot/share/gc/cms/parNewGeneration.cpp

Print this page

1228 // as we were made painfully aware not long ago, see 6786503).
1229 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1230 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1231   assert(is_in_reserved(from_space_obj), "Should be from this generation");
1232   if (ParGCUseLocalOverflow) {
1233     // In the case of compressed oops, we use a private, not-shared
1234     // overflow stack.
1235     par_scan_state->push_on_overflow_stack(from_space_obj);
1236   } else {
1237     assert(!UseCompressedOops, "Error");
1238     // if the object has been forwarded to itself, then we cannot
1239     // use the klass pointer for the linked list.  Instead we have
1240     // to allocate an oopDesc in the C-Heap and use that for the linked list.
1241     // XXX This is horribly inefficient when a promotion failure occurs
1242     // and should be fixed. XXX FIX ME !!!
1243 #ifndef PRODUCT
1244     Atomic::inc(&_num_par_pushes);
1245     assert(_num_par_pushes > 0, "Tautology");
1246 #endif
1247     if (from_space_obj->forwardee() == from_space_obj) {
1248       oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1249       listhead->forward_to(from_space_obj);
1250       from_space_obj = listhead;
1251     }
1252     oop observed_overflow_list = _overflow_list;
1253     oop cur_overflow_list;
1254     do {
1255       cur_overflow_list = observed_overflow_list;
1256       if (cur_overflow_list != BUSY) {
1257         from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1258       } else {
1259         from_space_obj->set_klass_to_list_ptr(NULL);
1260       }
1261       observed_overflow_list =
1262         Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
1263     } while (cur_overflow_list != observed_overflow_list);
1264   }
1265 }
1266 
1267 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1268   bool res;

1228 // as we were made painfully aware not long ago, see 6786503).
1229 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1230 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1231   assert(is_in_reserved(from_space_obj), "Should be from this generation");
1232   if (ParGCUseLocalOverflow) {
1233     // In the case of compressed oops, we use a private, not-shared
1234     // overflow stack.
1235     par_scan_state->push_on_overflow_stack(from_space_obj);
1236   } else {
1237     assert(!UseCompressedOops, "Error");
1238     // if the object has been forwarded to itself, then we cannot
1239     // use the klass pointer for the linked list.  Instead we have
1240     // to allocate an oopDesc in the C-Heap and use that for the linked list.
1241     // XXX This is horribly inefficient when a promotion failure occurs
1242     // and should be fixed. XXX FIX ME !!!
1243 #ifndef PRODUCT
1244     Atomic::inc(&_num_par_pushes);
1245     assert(_num_par_pushes > 0, "Tautology");
1246 #endif
1247     if (from_space_obj->forwardee() == from_space_obj) {
1248       oopDesc* listhead = NEW_C_HEAP_OBJ(oopDesc, mtGC);
1249       listhead->forward_to(from_space_obj);
1250       from_space_obj = listhead;
1251     }
1252     oop observed_overflow_list = _overflow_list;
1253     oop cur_overflow_list;
1254     do {
1255       cur_overflow_list = observed_overflow_list;
1256       if (cur_overflow_list != BUSY) {
1257         from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1258       } else {
1259         from_space_obj->set_klass_to_list_ptr(NULL);
1260       }
1261       observed_overflow_list =
1262         Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
1263     } while (cur_overflow_list != observed_overflow_list);
1264   }
1265 }
1266 
1267 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1268   bool res;
< prev index next >