< prev index next >

src/hotspot/share/gc/cms/parNewGeneration.cpp

Print this page

1228 // as we were made painfully aware not long ago, see 6786503).
1229 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1230 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1231   assert(is_in_reserved(from_space_obj), "Should be from this generation");
1232   if (ParGCUseLocalOverflow) {
1233     // In the case of compressed oops, we use a private, not-shared
1234     // overflow stack.
1235     par_scan_state->push_on_overflow_stack(from_space_obj);
1236   } else {
1237     assert(!UseCompressedOops, "Error");
1238     // if the object has been forwarded to itself, then we cannot
1239     // use the klass pointer for the linked list.  Instead we have
1240     // to allocate an oopDesc in the C-Heap and use that for the linked list.
1241     // XXX This is horribly inefficient when a promotion failure occurs
1242     // and should be fixed. XXX FIX ME !!!
1243 #ifndef PRODUCT
1244     Atomic::inc(&_num_par_pushes);
1245     assert(_num_par_pushes > 0, "Tautology");
1246 #endif
1247     if (from_space_obj->forwardee() == from_space_obj) {
1248       oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1249       listhead->forward_to(from_space_obj);
1250       from_space_obj = listhead;
1251     }
1252     oop observed_overflow_list = _overflow_list;
1253     oop cur_overflow_list;
1254     do {
1255       cur_overflow_list = observed_overflow_list;
1256       if (cur_overflow_list != BUSY) {
1257         from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1258       } else {
1259         from_space_obj->set_klass_to_list_ptr(NULL);
1260       }
1261       observed_overflow_list =
1262         Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
1263     } while (cur_overflow_list != observed_overflow_list);
1264   }
1265 }
1266 
1267 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1268   bool res;

1384         observed_overflow_list =
1385           Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
1386       } while (cur_overflow_list != observed_overflow_list);
1387     }
1388   }
1389 
1390   // Push objects on prefix list onto this thread's work queue
1391   assert(prefix != NULL && prefix != BUSY, "program logic");
1392   cur = prefix;
1393   ssize_t n = 0;
1394   while (cur != NULL) {
1395     oop obj_to_push = cur->forwardee();
1396     oop next        = cur->list_ptr_from_klass();
1397     cur->set_klass(obj_to_push->klass());
1398     // This may be an array object that is self-forwarded. In that case, the list pointer
1399     // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1400     if (!is_in_reserved(cur)) {
1401       // This can become a scaling bottleneck when there is work queue overflow coincident
1402       // with promotion failure.
1403       oopDesc* f = cur;
1404       FREE_C_HEAP_ARRAY(oopDesc, f);
1405     } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1406       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1407       obj_to_push = cur;
1408     }
1409     bool ok = work_q->push(obj_to_push);
1410     assert(ok, "Should have succeeded");
1411     cur = next;
1412     n++;
1413   }
1414   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1415 #ifndef PRODUCT
1416   assert(_num_par_pushes >= n, "Too many pops?");
1417   Atomic::sub(n, &_num_par_pushes);
1418 #endif
1419   return true;
1420 }
1421 #undef BUSY
1422 
1423 void ParNewGeneration::ref_processor_init() {
1424   if (_ref_processor == NULL) {

1228 // as we were made painfully aware not long ago, see 6786503).
1229 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1230 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1231   assert(is_in_reserved(from_space_obj), "Should be from this generation");
1232   if (ParGCUseLocalOverflow) {
1233     // In the case of compressed oops, we use a private, not-shared
1234     // overflow stack.
1235     par_scan_state->push_on_overflow_stack(from_space_obj);
1236   } else {
1237     assert(!UseCompressedOops, "Error");
1238     // if the object has been forwarded to itself, then we cannot
1239     // use the klass pointer for the linked list.  Instead we have
1240     // to allocate an oopDesc in the C-Heap and use that for the linked list.
1241     // XXX This is horribly inefficient when a promotion failure occurs
1242     // and should be fixed. XXX FIX ME !!!
1243 #ifndef PRODUCT
1244     Atomic::inc(&_num_par_pushes);
1245     assert(_num_par_pushes > 0, "Tautology");
1246 #endif
1247     if (from_space_obj->forwardee() == from_space_obj) {
1248       oopDesc* listhead = NEW_C_HEAP_OBJ(oopDesc, mtGC);
1249       listhead->forward_to(from_space_obj);
1250       from_space_obj = listhead;
1251     }
1252     oop observed_overflow_list = _overflow_list;
1253     oop cur_overflow_list;
1254     do {
1255       cur_overflow_list = observed_overflow_list;
1256       if (cur_overflow_list != BUSY) {
1257         from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1258       } else {
1259         from_space_obj->set_klass_to_list_ptr(NULL);
1260       }
1261       observed_overflow_list =
1262         Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
1263     } while (cur_overflow_list != observed_overflow_list);
1264   }
1265 }
1266 
1267 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1268   bool res;

1384         observed_overflow_list =
1385           Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
1386       } while (cur_overflow_list != observed_overflow_list);
1387     }
1388   }
1389 
1390   // Push objects on prefix list onto this thread's work queue
1391   assert(prefix != NULL && prefix != BUSY, "program logic");
1392   cur = prefix;
1393   ssize_t n = 0;
1394   while (cur != NULL) {
1395     oop obj_to_push = cur->forwardee();
1396     oop next        = cur->list_ptr_from_klass();
1397     cur->set_klass(obj_to_push->klass());
1398     // This may be an array object that is self-forwarded. In that case, the list pointer
1399     // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1400     if (!is_in_reserved(cur)) {
1401       // This can become a scaling bottleneck when there is work queue overflow coincident
1402       // with promotion failure.
1403       oopDesc* f = cur;
1404       FREE_C_HEAP_OBJ(f);
1405     } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1406       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1407       obj_to_push = cur;
1408     }
1409     bool ok = work_q->push(obj_to_push);
1410     assert(ok, "Should have succeeded");
1411     cur = next;
1412     n++;
1413   }
1414   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1415 #ifndef PRODUCT
1416   assert(_num_par_pushes >= n, "Too many pops?");
1417   Atomic::sub(n, &_num_par_pushes);
1418 #endif
1419   return true;
1420 }
1421 #undef BUSY
1422 
1423 void ParNewGeneration::ref_processor_init() {
1424   if (_ref_processor == NULL) {
< prev index next >