1619 }
1620 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1621 #ifndef PRODUCT
1622 assert(_num_par_pushes >= n, "Too many pops?");
1623 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1624 #endif
1625 return true;
1626 }
1627 #undef BUSY
1628
1629 void ParNewGeneration::ref_processor_init() {
1630 if (_ref_processor == NULL) {
1631 // Allocate and initialize a reference processor
1632 _ref_processor =
1633 new ReferenceProcessor(_reserved, // span
1634 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1635 (int) ParallelGCThreads, // mt processing degree
1636 refs_discovery_is_mt(), // mt discovery
1637 (int) ParallelGCThreads, // mt discovery degree
1638 refs_discovery_is_atomic(), // atomic_discovery
1639 NULL, // is_alive_non_header
1640 false); // write barrier for next field updates
1641 }
1642 }
1643
1644 const char* ParNewGeneration::name() const {
1645 return "par new generation";
1646 }
|
1619 }
1620 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1621 #ifndef PRODUCT
1622 assert(_num_par_pushes >= n, "Too many pops?");
1623 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1624 #endif
1625 return true;
1626 }
1627 #undef BUSY
1628
1629 void ParNewGeneration::ref_processor_init() {
1630 if (_ref_processor == NULL) {
1631 // Allocate and initialize a reference processor
1632 _ref_processor =
1633 new ReferenceProcessor(_reserved, // span
1634 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1635 (int) ParallelGCThreads, // mt processing degree
1636 refs_discovery_is_mt(), // mt discovery
1637 (int) ParallelGCThreads, // mt discovery degree
1638 refs_discovery_is_atomic(), // atomic_discovery
1639 NULL); // is_alive_non_header
1640 }
1641 }
1642
1643 const char* ParNewGeneration::name() const {
1644 return "par new generation";
1645 }
|