< prev index next >

src/hotspot/share/gc/cms/parNewGeneration.cpp

Print this page

1228 // as we were made painfully aware not long ago, see 6786503).
1229 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1230 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1231   assert(is_in_reserved(from_space_obj), "Should be from this generation");
1232   if (ParGCUseLocalOverflow) {
1233     // In the case of compressed oops, we use a private, not-shared
1234     // overflow stack.
1235     par_scan_state->push_on_overflow_stack(from_space_obj);
1236   } else {
1237     assert(!UseCompressedOops, "Error");
1238     // if the object has been forwarded to itself, then we cannot
1239     // use the klass pointer for the linked list.  Instead we have
1240     // to allocate an oopDesc in the C-Heap and use that for the linked list.
1241     // XXX This is horribly inefficient when a promotion failure occurs
1242     // and should be fixed. XXX FIX ME !!!
1243 #ifndef PRODUCT
1244     Atomic::inc(&_num_par_pushes);
1245     assert(_num_par_pushes > 0, "Tautology");
1246 #endif
1247     if (from_space_obj->forwardee() == from_space_obj) {
1248       oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1249       listhead->forward_to(from_space_obj);
1250       from_space_obj = listhead;
1251     }
1252     oop observed_overflow_list = _overflow_list;
1253     oop cur_overflow_list;
1254     do {
1255       cur_overflow_list = observed_overflow_list;
1256       if (cur_overflow_list != BUSY) {
1257         from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1258       } else {
1259         from_space_obj->set_klass_to_list_ptr(NULL);
1260       }
1261       observed_overflow_list =
1262         Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
1263     } while (cur_overflow_list != observed_overflow_list);
1264   }
1265 }
1266 
1267 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1268   bool res;

1385         observed_overflow_list =
1386           Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
1387       } while (cur_overflow_list != observed_overflow_list);
1388     }
1389   }
1390 
1391   // Push objects on prefix list onto this thread's work queue
1392   assert(prefix != NULL && prefix != BUSY, "program logic");
1393   cur = prefix;
1394   ssize_t n = 0;
1395   while (cur != NULL) {
1396     oop obj_to_push = cur->forwardee();
1397     oop next        = cur->list_ptr_from_klass();
1398     cur->set_klass(obj_to_push->klass());
1399     // This may be an array object that is self-forwarded. In that case, the list pointer
1400     // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1401     if (!is_in_reserved(cur)) {
1402       // This can become a scaling bottleneck when there is work queue overflow coincident
1403       // with promotion failure.
1404       oopDesc* f = cur;
1405       FREE_C_HEAP_ARRAY(oopDesc, f);
1406     } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1407       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1408       obj_to_push = cur;
1409     }
1410     bool ok = work_q->push(obj_to_push);
1411     assert(ok, "Should have succeeded");
1412     cur = next;
1413     n++;
1414   }
1415   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1416 #ifndef PRODUCT
1417   assert(_num_par_pushes >= n, "Too many pops?");
1418   Atomic::sub(n, &_num_par_pushes);
1419 #endif
1420   return true;
1421 }
1422 #undef BUSY
1423 
1424 void ParNewGeneration::ref_processor_init() {
1425   if (_ref_processor == NULL) {

1228 // as we were made painfully aware not long ago, see 6786503).
1229 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1230 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1231   assert(is_in_reserved(from_space_obj), "Should be from this generation");
1232   if (ParGCUseLocalOverflow) {
1233     // In the case of compressed oops, we use a private, not-shared
1234     // overflow stack.
1235     par_scan_state->push_on_overflow_stack(from_space_obj);
1236   } else {
1237     assert(!UseCompressedOops, "Error");
1238     // if the object has been forwarded to itself, then we cannot
1239     // use the klass pointer for the linked list.  Instead we have
1240     // to allocate an oopDesc in the C-Heap and use that for the linked list.
1241     // XXX This is horribly inefficient when a promotion failure occurs
1242     // and should be fixed. XXX FIX ME !!!
1243 #ifndef PRODUCT
1244     Atomic::inc(&_num_par_pushes);
1245     assert(_num_par_pushes > 0, "Tautology");
1246 #endif
1247     if (from_space_obj->forwardee() == from_space_obj) {
1248       oopDesc* listhead = NEW_C_HEAP_OBJ(oopDesc, mtGC);
1249       listhead->forward_to(from_space_obj);
1250       from_space_obj = listhead;
1251     }
1252     oop observed_overflow_list = _overflow_list;
1253     oop cur_overflow_list;
1254     do {
1255       cur_overflow_list = observed_overflow_list;
1256       if (cur_overflow_list != BUSY) {
1257         from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1258       } else {
1259         from_space_obj->set_klass_to_list_ptr(NULL);
1260       }
1261       observed_overflow_list =
1262         Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
1263     } while (cur_overflow_list != observed_overflow_list);
1264   }
1265 }
1266 
1267 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1268   bool res;

1385         observed_overflow_list =
1386           Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
1387       } while (cur_overflow_list != observed_overflow_list);
1388     }
1389   }
1390 
1391   // Push objects on prefix list onto this thread's work queue
1392   assert(prefix != NULL && prefix != BUSY, "program logic");
1393   cur = prefix;
1394   ssize_t n = 0;
1395   while (cur != NULL) {
1396     oop obj_to_push = cur->forwardee();
1397     oop next        = cur->list_ptr_from_klass();
1398     cur->set_klass(obj_to_push->klass());
1399     // This may be an array object that is self-forwarded. In that case, the list pointer
1400     // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1401     if (!is_in_reserved(cur)) {
1402       // This can become a scaling bottleneck when there is work queue overflow coincident
1403       // with promotion failure.
1404       oopDesc* f = cur;
1405       FREE_C_HEAP_OBJ(f);
1406     } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1407       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1408       obj_to_push = cur;
1409     }
1410     bool ok = work_q->push(obj_to_push);
1411     assert(ok, "Should have succeeded");
1412     cur = next;
1413     n++;
1414   }
1415   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1416 #ifndef PRODUCT
1417   assert(_num_par_pushes >= n, "Too many pops?");
1418   Atomic::sub(n, &_num_par_pushes);
1419 #endif
1420   return true;
1421 }
1422 #undef BUSY
1423 
1424 void ParNewGeneration::ref_processor_init() {
1425   if (_ref_processor == NULL) {
< prev index next >