src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp

Print this page




 339 
 340   do {
 341     space = space_info[id].space();
 342     print_generic_summary_data(summary_data, space->bottom(), space->top());
 343   } while (++id < PSParallelCompact::last_space_id);
 344 }
 345 #endif  // #ifndef PRODUCT
 346 
 347 #ifdef  ASSERT
 348 size_t add_obj_count;
 349 size_t add_obj_size;
 350 size_t mark_bitmap_count;
 351 size_t mark_bitmap_size;
 352 #endif  // #ifdef ASSERT
 353 
 354 ParallelCompactData::ParallelCompactData()
 355 {
 356   _region_start = 0;
 357 
 358   _region_vspace = 0;

 359   _region_data = 0;
 360   _region_count = 0;
 361 }
 362 
 363 bool ParallelCompactData::initialize(MemRegion covered_region)
 364 {
 365   _region_start = covered_region.start();
 366   const size_t region_size = covered_region.word_size();
 367   DEBUG_ONLY(_region_end = _region_start + region_size;)
 368 
 369   assert(region_align_down(_region_start) == _region_start,
 370          "region start not aligned");
 371   assert((region_size & RegionSizeOffsetMask) == 0,
 372          "region size not a multiple of RegionSize");
 373 
 374   bool result = initialize_region_data(region_size);
 375 
 376   return result;
 377 }
 378 
 379 PSVirtualSpace*
 380 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 381 {
 382   const size_t raw_bytes = count * element_size;
 383   const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
 384   const size_t granularity = os::vm_allocation_granularity();
 385   const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 386 
 387   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 388     MAX2(page_sz, granularity);
 389   ReservedSpace rs(bytes, rs_align, rs_align > 0);
 390   os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
 391                        rs.size());
 392 
 393   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 394 
 395   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 396   if (vspace != 0) {
 397     if (vspace->expand_by(bytes)) {
 398       return vspace;
 399     }
 400     delete vspace;
 401     // Release memory reserved in the space.
 402     rs.release();
 403   }
 404 
 405   return 0;
 406 }
 407 
 408 bool ParallelCompactData::initialize_region_data(size_t region_size)
 409 {
 410   const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
 411   _region_vspace = create_vspace(count, sizeof(RegionData));
 412   if (_region_vspace != 0) {
 413     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 414     _region_count = count;
 415     return true;
 416   }
 417   return false;


 824   _counters = new CollectorCounters("PSParallelCompact", 1);
 825 
 826   // Initialize static fields in ParCompactionManager.
 827   ParCompactionManager::initialize(mark_bitmap());
 828 }
 829 
 830 bool PSParallelCompact::initialize() {
 831   ParallelScavengeHeap* heap = gc_heap();
 832   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 833   MemRegion mr = heap->reserved_region();
 834 
 835   // Was the old gen get allocated successfully?
 836   if (!heap->old_gen()->is_allocated()) {
 837     return false;
 838   }
 839 
 840   initialize_space_info();
 841   initialize_dead_wood_limiter();
 842 
 843   if (!_mark_bitmap.initialize(mr)) {
 844     vm_shutdown_during_initialization("Unable to allocate bit map for "
 845       "parallel garbage collection for the requested heap size.");


 846     return false;
 847   }
 848 
 849   if (!_summary_data.initialize(mr)) {
 850     vm_shutdown_during_initialization("Unable to allocate tables for "
 851       "parallel garbage collection for the requested heap size.");


 852     return false;
 853   }
 854 
 855   return true;
 856 }
 857 
 858 void PSParallelCompact::initialize_space_info()
 859 {
 860   memset(&_space_info, 0, sizeof(_space_info));
 861 
 862   ParallelScavengeHeap* heap = gc_heap();
 863   PSYoungGen* young_gen = heap->young_gen();
 864 
 865   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 866   _space_info[eden_space_id].set_space(young_gen->eden_space());
 867   _space_info[from_space_id].set_space(young_gen->from_space());
 868   _space_info[to_space_id].set_space(young_gen->to_space());
 869 
 870   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 871 }




 339 
 340   do {
 341     space = space_info[id].space();
 342     print_generic_summary_data(summary_data, space->bottom(), space->top());
 343   } while (++id < PSParallelCompact::last_space_id);
 344 }
 345 #endif  // #ifndef PRODUCT
 346 
 347 #ifdef  ASSERT
 348 size_t add_obj_count;
 349 size_t add_obj_size;
 350 size_t mark_bitmap_count;
 351 size_t mark_bitmap_size;
 352 #endif  // #ifdef ASSERT
 353 
 354 ParallelCompactData::ParallelCompactData()
 355 {
 356   _region_start = 0;
 357 
 358   _region_vspace = 0;
 359   _reserved_byte_size = 0;
 360   _region_data = 0;
 361   _region_count = 0;
 362 }
 363 
 364 bool ParallelCompactData::initialize(MemRegion covered_region)
 365 {
 366   _region_start = covered_region.start();
 367   const size_t region_size = covered_region.word_size();
 368   DEBUG_ONLY(_region_end = _region_start + region_size;)
 369 
 370   assert(region_align_down(_region_start) == _region_start,
 371          "region start not aligned");
 372   assert((region_size & RegionSizeOffsetMask) == 0,
 373          "region size not a multiple of RegionSize");
 374 
 375   bool result = initialize_region_data(region_size);
 376 
 377   return result;
 378 }
 379 
 380 PSVirtualSpace*
 381 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 382 {
 383   const size_t raw_bytes = count * element_size;
 384   const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
 385   const size_t granularity = os::vm_allocation_granularity();
 386   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 387 
 388   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 389     MAX2(page_sz, granularity);
 390   ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
 391   os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
 392                        rs.size());
 393 
 394   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 395 
 396   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 397   if (vspace != 0) {
 398     if (vspace->expand_by(_reserved_byte_size)) {
 399       return vspace;
 400     }
 401     delete vspace;
 402     // Release memory reserved in the space.
 403     rs.release();
 404   }
 405 
 406   return 0;
 407 }
 408 
 409 bool ParallelCompactData::initialize_region_data(size_t region_size)
 410 {
 411   const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
 412   _region_vspace = create_vspace(count, sizeof(RegionData));
 413   if (_region_vspace != 0) {
 414     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 415     _region_count = count;
 416     return true;
 417   }
 418   return false;


 825   _counters = new CollectorCounters("PSParallelCompact", 1);
 826 
 827   // Initialize static fields in ParCompactionManager.
 828   ParCompactionManager::initialize(mark_bitmap());
 829 }
 830 
 831 bool PSParallelCompact::initialize() {
 832   ParallelScavengeHeap* heap = gc_heap();
 833   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 834   MemRegion mr = heap->reserved_region();
 835 
 836   // Was the old gen get allocated successfully?
 837   if (!heap->old_gen()->is_allocated()) {
 838     return false;
 839   }
 840 
 841   initialize_space_info();
 842   initialize_dead_wood_limiter();
 843 
 844   if (!_mark_bitmap.initialize(mr)) {
 845     vm_shutdown_during_initialization(
 846       err_msg("Unable to allocate " SIZE_FORMAT " byte bitmap "
 847       "for parallel garbage collection for the requested " SIZE_FORMAT " byte heap size.",
 848       _mark_bitmap.reserved_byte_size(), mr.byte_size()));
 849     return false;
 850   }
 851 
 852   if (!_summary_data.initialize(mr)) {
 853     vm_shutdown_during_initialization(
 854       err_msg("Unable to allocate " SIZE_FORMAT " byte tables "
 855       "for parallel garbage collection for the requested " SIZE_FORMAT " byte heap size.",
 856       _summary_data.reserved_byte_size(), mr.byte_size()));
 857     return false;
 858   }
 859 
 860   return true;
 861 }
 862 
 863 void PSParallelCompact::initialize_space_info()
 864 {
 865   memset(&_space_info, 0, sizeof(_space_info));
 866 
 867   ParallelScavengeHeap* heap = gc_heap();
 868   PSYoungGen* young_gen = heap->young_gen();
 869 
 870   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 871   _space_info[eden_space_id].set_space(young_gen->eden_space());
 872   _space_info[from_space_id].set_space(young_gen->from_space());
 873   _space_info[to_space_id].set_space(young_gen->to_space());
 874 
 875   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 876 }