1279 // and should be fixed. XXX FIX ME !!!
1280 #ifndef PRODUCT
1281 Atomic::inc(&_num_par_pushes);
1282 assert(_num_par_pushes > 0, "Tautology");
1283 #endif
1284 if (from_space_obj->forwardee() == from_space_obj) {
1285 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1286 listhead->forward_to(from_space_obj);
1287 from_space_obj = listhead;
1288 }
1289 oop observed_overflow_list = _overflow_list;
1290 oop cur_overflow_list;
1291 do {
1292 cur_overflow_list = observed_overflow_list;
1293 if (cur_overflow_list != BUSY) {
1294 from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1295 } else {
1296 from_space_obj->set_klass_to_list_ptr(NULL);
1297 }
1298 observed_overflow_list =
1299 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
1300 } while (cur_overflow_list != observed_overflow_list);
1301 }
1302 }
1303
1304 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1305 bool res;
1306
1307 if (ParGCUseLocalOverflow) {
1308 res = par_scan_state->take_from_overflow_stack();
1309 } else {
1310 assert(!UseCompressedOops, "Error");
1311 res = take_from_overflow_list_work(par_scan_state);
1312 }
1313 return res;
1314 }
1315
1316
1317 // *NOTE*: The overflow list manipulation code here and
1318 // in CMSCollector:: are very similar in shape,
1319 // except that in the CMS case we thread the objects
1322 // to chunking of object arrays and promotion failure
1323 // handling.
1324 // CR 6797058 has been filed to attempt consolidation of
1325 // the common code.
1326 // Because of the common code, if you make any changes in
1327 // the code below, please check the CMS version to see if
1328 // similar changes might be needed.
1329 // See CMSCollector::par_take_from_overflow_list() for
1330 // more extensive documentation comments.
1331 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
1332 ObjToScanQueue* work_q = par_scan_state->work_queue();
1333 // How many to take?
1334 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
1335 (size_t)ParGCDesiredObjsFromOverflowList);
1336
1337 assert(!UseCompressedOops, "Error");
1338 assert(par_scan_state->overflow_stack() == NULL, "Error");
1339 if (_overflow_list == NULL) return false;
1340
1341 // Otherwise, there was something there; try claiming the list.
1342 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
1343 // Trim off a prefix of at most objsFromOverflow items
1344 Thread* tid = Thread::current();
1345 size_t spin_count = ParallelGCThreads;
1346 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
1347 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
1348 // someone grabbed it before we did ...
1349 // ... we spin for a short while...
1350 os::sleep(tid, sleep_time_millis, false);
1351 if (_overflow_list == NULL) {
1352 // nothing left to take
1353 return false;
1354 } else if (_overflow_list != BUSY) {
1355 // try and grab the prefix
1356 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
1357 }
1358 }
1359 if (prefix == NULL || prefix == BUSY) {
1360 // Nothing to take or waited long enough
1361 if (prefix == NULL) {
1362 // Write back the NULL in case we overwrote it with BUSY above
1363 // and it is still the same value.
1364 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1365 }
1366 return false;
1367 }
1368 assert(prefix != NULL && prefix != BUSY, "Error");
1369 oop cur = prefix;
1370 for (size_t i = 1; i < objsFromOverflow; ++i) {
1371 oop next = cur->list_ptr_from_klass();
1372 if (next == NULL) break;
1373 cur = next;
1374 }
1375 assert(cur != NULL, "Loop postcondition");
1376
1377 // Reattach remaining (suffix) to overflow list
1378 oop suffix = cur->list_ptr_from_klass();
1379 if (suffix == NULL) {
1380 // Write back the NULL in lieu of the BUSY we wrote
1381 // above and it is still the same value.
1382 if (_overflow_list == BUSY) {
1383 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1384 }
1385 } else {
1386 assert(suffix != BUSY, "Error");
1387 // suffix will be put back on global list
1388 cur->set_klass_to_list_ptr(NULL); // break off suffix
1389 // It's possible that the list is still in the empty(busy) state
1390 // we left it in a short while ago; in that case we may be
1391 // able to place back the suffix.
1392 oop observed_overflow_list = _overflow_list;
1393 oop cur_overflow_list = observed_overflow_list;
1394 bool attached = false;
1395 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
1396 observed_overflow_list =
1397 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1398 if (cur_overflow_list == observed_overflow_list) {
1399 attached = true;
1400 break;
1401 } else cur_overflow_list = observed_overflow_list;
1402 }
1403 if (!attached) {
1404 // Too bad, someone else got in in between; we'll need to do a splice.
1405 // Find the last item of suffix list
1406 oop last = suffix;
1407 while (true) {
1408 oop next = last->list_ptr_from_klass();
1409 if (next == NULL) break;
1410 last = next;
1411 }
1412 // Atomically prepend suffix to current overflow list
1413 observed_overflow_list = _overflow_list;
1414 do {
1415 cur_overflow_list = observed_overflow_list;
1416 if (cur_overflow_list != BUSY) {
1417 // Do the splice ...
1418 last->set_klass_to_list_ptr(cur_overflow_list);
1419 } else { // cur_overflow_list == BUSY
1420 last->set_klass_to_list_ptr(NULL);
1421 }
1422 observed_overflow_list =
1423 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1424 } while (cur_overflow_list != observed_overflow_list);
1425 }
1426 }
1427
1428 // Push objects on prefix list onto this thread's work queue
1429 assert(prefix != NULL && prefix != BUSY, "program logic");
1430 cur = prefix;
1431 ssize_t n = 0;
1432 while (cur != NULL) {
1433 oop obj_to_push = cur->forwardee();
1434 oop next = cur->list_ptr_from_klass();
1435 cur->set_klass(obj_to_push->klass());
1436 // This may be an array object that is self-forwarded. In that case, the list pointer
1437 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1438 if (!is_in_reserved(cur)) {
1439 // This can become a scaling bottleneck when there is work queue overflow coincident
1440 // with promotion failure.
1441 oopDesc* f = cur;
1442 FREE_C_HEAP_ARRAY(oopDesc, f);
1443 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1444 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1445 obj_to_push = cur;
1446 }
1447 bool ok = work_q->push(obj_to_push);
1448 assert(ok, "Should have succeeded");
1449 cur = next;
1450 n++;
1451 }
1452 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1453 #ifndef PRODUCT
1454 assert(_num_par_pushes >= n, "Too many pops?");
1455 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1456 #endif
1457 return true;
1458 }
1459 #undef BUSY
1460
1461 void ParNewGeneration::ref_processor_init() {
1462 if (_ref_processor == NULL) {
1463 // Allocate and initialize a reference processor
1464 _ref_processor =
1465 new ReferenceProcessor(_reserved, // span
1466 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1467 ParallelGCThreads, // mt processing degree
1468 refs_discovery_is_mt(), // mt discovery
1469 ParallelGCThreads, // mt discovery degree
1470 refs_discovery_is_atomic(), // atomic_discovery
1471 NULL); // is_alive_non_header
1472 }
1473 }
1474
1475 const char* ParNewGeneration::name() const {
|
1279 // and should be fixed. XXX FIX ME !!!
1280 #ifndef PRODUCT
1281 Atomic::inc(&_num_par_pushes);
1282 assert(_num_par_pushes > 0, "Tautology");
1283 #endif
1284 if (from_space_obj->forwardee() == from_space_obj) {
1285 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1286 listhead->forward_to(from_space_obj);
1287 from_space_obj = listhead;
1288 }
1289 oop observed_overflow_list = _overflow_list;
1290 oop cur_overflow_list;
1291 do {
1292 cur_overflow_list = observed_overflow_list;
1293 if (cur_overflow_list != BUSY) {
1294 from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1295 } else {
1296 from_space_obj->set_klass_to_list_ptr(NULL);
1297 }
1298 observed_overflow_list =
1299 Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
1300 } while (cur_overflow_list != observed_overflow_list);
1301 }
1302 }
1303
1304 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1305 bool res;
1306
1307 if (ParGCUseLocalOverflow) {
1308 res = par_scan_state->take_from_overflow_stack();
1309 } else {
1310 assert(!UseCompressedOops, "Error");
1311 res = take_from_overflow_list_work(par_scan_state);
1312 }
1313 return res;
1314 }
1315
1316
1317 // *NOTE*: The overflow list manipulation code here and
1318 // in CMSCollector:: are very similar in shape,
1319 // except that in the CMS case we thread the objects
1322 // to chunking of object arrays and promotion failure
1323 // handling.
1324 // CR 6797058 has been filed to attempt consolidation of
1325 // the common code.
1326 // Because of the common code, if you make any changes in
1327 // the code below, please check the CMS version to see if
1328 // similar changes might be needed.
1329 // See CMSCollector::par_take_from_overflow_list() for
1330 // more extensive documentation comments.
1331 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
1332 ObjToScanQueue* work_q = par_scan_state->work_queue();
1333 // How many to take?
1334 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
1335 (size_t)ParGCDesiredObjsFromOverflowList);
1336
1337 assert(!UseCompressedOops, "Error");
1338 assert(par_scan_state->overflow_stack() == NULL, "Error");
1339 if (_overflow_list == NULL) return false;
1340
1341 // Otherwise, there was something there; try claiming the list.
1342 oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
1343 // Trim off a prefix of at most objsFromOverflow items
1344 Thread* tid = Thread::current();
1345 size_t spin_count = ParallelGCThreads;
1346 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
1347 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
1348 // someone grabbed it before we did ...
1349 // ... we spin for a short while...
1350 os::sleep(tid, sleep_time_millis, false);
1351 if (_overflow_list == NULL) {
1352 // nothing left to take
1353 return false;
1354 } else if (_overflow_list != BUSY) {
1355 // try and grab the prefix
1356 prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
1357 }
1358 }
1359 if (prefix == NULL || prefix == BUSY) {
1360 // Nothing to take or waited long enough
1361 if (prefix == NULL) {
1362 // Write back the NULL in case we overwrote it with BUSY above
1363 // and it is still the same value.
1364 (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
1365 }
1366 return false;
1367 }
1368 assert(prefix != NULL && prefix != BUSY, "Error");
1369 oop cur = prefix;
1370 for (size_t i = 1; i < objsFromOverflow; ++i) {
1371 oop next = cur->list_ptr_from_klass();
1372 if (next == NULL) break;
1373 cur = next;
1374 }
1375 assert(cur != NULL, "Loop postcondition");
1376
1377 // Reattach remaining (suffix) to overflow list
1378 oop suffix = cur->list_ptr_from_klass();
1379 if (suffix == NULL) {
1380 // Write back the NULL in lieu of the BUSY we wrote
1381 // above and it is still the same value.
1382 if (_overflow_list == BUSY) {
1383 (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
1384 }
1385 } else {
1386 assert(suffix != BUSY, "Error");
1387 // suffix will be put back on global list
1388 cur->set_klass_to_list_ptr(NULL); // break off suffix
1389 // It's possible that the list is still in the empty(busy) state
1390 // we left it in a short while ago; in that case we may be
1391 // able to place back the suffix.
1392 oop observed_overflow_list = _overflow_list;
1393 oop cur_overflow_list = observed_overflow_list;
1394 bool attached = false;
1395 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
1396 observed_overflow_list =
1397 Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
1398 if (cur_overflow_list == observed_overflow_list) {
1399 attached = true;
1400 break;
1401 } else cur_overflow_list = observed_overflow_list;
1402 }
1403 if (!attached) {
1404 // Too bad, someone else got in in between; we'll need to do a splice.
1405 // Find the last item of suffix list
1406 oop last = suffix;
1407 while (true) {
1408 oop next = last->list_ptr_from_klass();
1409 if (next == NULL) break;
1410 last = next;
1411 }
1412 // Atomically prepend suffix to current overflow list
1413 observed_overflow_list = _overflow_list;
1414 do {
1415 cur_overflow_list = observed_overflow_list;
1416 if (cur_overflow_list != BUSY) {
1417 // Do the splice ...
1418 last->set_klass_to_list_ptr(cur_overflow_list);
1419 } else { // cur_overflow_list == BUSY
1420 last->set_klass_to_list_ptr(NULL);
1421 }
1422 observed_overflow_list =
1423 Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
1424 } while (cur_overflow_list != observed_overflow_list);
1425 }
1426 }
1427
1428 // Push objects on prefix list onto this thread's work queue
1429 assert(prefix != NULL && prefix != BUSY, "program logic");
1430 cur = prefix;
1431 ssize_t n = 0;
1432 while (cur != NULL) {
1433 oop obj_to_push = cur->forwardee();
1434 oop next = cur->list_ptr_from_klass();
1435 cur->set_klass(obj_to_push->klass());
1436 // This may be an array object that is self-forwarded. In that case, the list pointer
1437 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1438 if (!is_in_reserved(cur)) {
1439 // This can become a scaling bottleneck when there is work queue overflow coincident
1440 // with promotion failure.
1441 oopDesc* f = cur;
1442 FREE_C_HEAP_ARRAY(oopDesc, f);
1443 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1444 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1445 obj_to_push = cur;
1446 }
1447 bool ok = work_q->push(obj_to_push);
1448 assert(ok, "Should have succeeded");
1449 cur = next;
1450 n++;
1451 }
1452 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1453 #ifndef PRODUCT
1454 assert(_num_par_pushes >= n, "Too many pops?");
1455 Atomic::sub(n, &_num_par_pushes);
1456 #endif
1457 return true;
1458 }
1459 #undef BUSY
1460
1461 void ParNewGeneration::ref_processor_init() {
1462 if (_ref_processor == NULL) {
1463 // Allocate and initialize a reference processor
1464 _ref_processor =
1465 new ReferenceProcessor(_reserved, // span
1466 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1467 ParallelGCThreads, // mt processing degree
1468 refs_discovery_is_mt(), // mt discovery
1469 ParallelGCThreads, // mt discovery degree
1470 refs_discovery_is_atomic(), // atomic_discovery
1471 NULL); // is_alive_non_header
1472 }
1473 }
1474
1475 const char* ParNewGeneration::name() const {
|