515 {
516 const RegionData* cur_cp = region(region_idx);
517 const RegionData* const end_cp = region(region_count() - 1);
518
519 HeapWord* result = region_to_addr(region_idx);
520 if (cur_cp < end_cp) {
521 do {
522 result += cur_cp->partial_obj_size();
523 } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
524 }
525 return result;
526 }
527
528 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
529 {
530 const size_t obj_ofs = pointer_delta(addr, _region_start);
531 const size_t beg_region = obj_ofs >> Log2RegionSize;
532 const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
533
534 DEBUG_ONLY(Atomic::inc(&add_obj_count);)
535 DEBUG_ONLY(Atomic::add(len, &add_obj_size);)
536
537 if (beg_region == end_region) {
538 // All in one region.
539 _region_data[beg_region].add_live_obj(len);
540 return;
541 }
542
543 // First region.
544 const size_t beg_ofs = region_offset(addr);
545 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
546
547 Klass* klass = ((oop)addr)->klass();
548 // Middle regions--completely spanned by this object.
549 for (size_t region = beg_region + 1; region < end_region; ++region) {
550 _region_data[region].set_partial_obj_size(RegionSize);
551 _region_data[region].set_partial_obj_addr(addr);
552 }
553
554 // Last region.
555 const size_t end_ofs = region_offset(addr + len - 1);
2432 class TaskQueue : StackObj {
2433 volatile uint _counter;
2434 uint _size;
2435 uint _insert_index;
2436 PSParallelCompact::UpdateDensePrefixTask* _backing_array;
2437 public:
2438 explicit TaskQueue(uint size) : _counter(0), _size(size), _insert_index(0), _backing_array(NULL) {
2439 _backing_array = NEW_C_HEAP_ARRAY(PSParallelCompact::UpdateDensePrefixTask, _size, mtGC);
2440 }
2441 ~TaskQueue() {
2442 assert(_counter >= _insert_index, "not all queue elements were claimed");
2443 FREE_C_HEAP_ARRAY(T, _backing_array);
2444 }
2445
2446 void push(const PSParallelCompact::UpdateDensePrefixTask& value) {
2447 assert(_insert_index < _size, "too small backing array");
2448 _backing_array[_insert_index++] = value;
2449 }
2450
2451 bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
2452 uint claimed = Atomic::add(1u, &_counter) - 1; // -1 is so that we start with zero
2453 if (claimed < _insert_index) {
2454 reference = _backing_array[claimed];
2455 return true;
2456 } else {
2457 return false;
2458 }
2459 }
2460 };
2461
2462 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2463
2464 void PSParallelCompact::enqueue_dense_prefix_tasks(TaskQueue& task_queue,
2465 uint parallel_gc_threads) {
2466 GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
2467
2468 ParallelCompactData& sd = PSParallelCompact::summary_data();
2469
2470 // Iterate over all the spaces adding tasks for updating
2471 // regions in the dense prefix. Assume that 1 gc thread
2472 // will work on opening the gaps and the remaining gc threads
|
515 {
516 const RegionData* cur_cp = region(region_idx);
517 const RegionData* const end_cp = region(region_count() - 1);
518
519 HeapWord* result = region_to_addr(region_idx);
520 if (cur_cp < end_cp) {
521 do {
522 result += cur_cp->partial_obj_size();
523 } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
524 }
525 return result;
526 }
527
528 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
529 {
530 const size_t obj_ofs = pointer_delta(addr, _region_start);
531 const size_t beg_region = obj_ofs >> Log2RegionSize;
532 const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
533
534 DEBUG_ONLY(Atomic::inc(&add_obj_count);)
535 DEBUG_ONLY(Atomic::add(&add_obj_size, len);)
536
537 if (beg_region == end_region) {
538 // All in one region.
539 _region_data[beg_region].add_live_obj(len);
540 return;
541 }
542
543 // First region.
544 const size_t beg_ofs = region_offset(addr);
545 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
546
547 Klass* klass = ((oop)addr)->klass();
548 // Middle regions--completely spanned by this object.
549 for (size_t region = beg_region + 1; region < end_region; ++region) {
550 _region_data[region].set_partial_obj_size(RegionSize);
551 _region_data[region].set_partial_obj_addr(addr);
552 }
553
554 // Last region.
555 const size_t end_ofs = region_offset(addr + len - 1);
2432 class TaskQueue : StackObj {
2433 volatile uint _counter;
2434 uint _size;
2435 uint _insert_index;
2436 PSParallelCompact::UpdateDensePrefixTask* _backing_array;
2437 public:
2438 explicit TaskQueue(uint size) : _counter(0), _size(size), _insert_index(0), _backing_array(NULL) {
2439 _backing_array = NEW_C_HEAP_ARRAY(PSParallelCompact::UpdateDensePrefixTask, _size, mtGC);
2440 }
2441 ~TaskQueue() {
2442 assert(_counter >= _insert_index, "not all queue elements were claimed");
2443 FREE_C_HEAP_ARRAY(T, _backing_array);
2444 }
2445
2446 void push(const PSParallelCompact::UpdateDensePrefixTask& value) {
2447 assert(_insert_index < _size, "too small backing array");
2448 _backing_array[_insert_index++] = value;
2449 }
2450
2451 bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
2452 uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero
2453 if (claimed < _insert_index) {
2454 reference = _backing_array[claimed];
2455 return true;
2456 } else {
2457 return false;
2458 }
2459 }
2460 };
2461
2462 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2463
2464 void PSParallelCompact::enqueue_dense_prefix_tasks(TaskQueue& task_queue,
2465 uint parallel_gc_threads) {
2466 GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
2467
2468 ParallelCompactData& sd = PSParallelCompact::summary_data();
2469
2470 // Iterate over all the spaces adding tasks for updating
2471 // regions in the dense prefix. Assume that 1 gc thread
2472 // will work on opening the gaps and the remaining gc threads
|