< prev index next >

src/share/vm/gc/parallel/objectStartArray.cpp

Print this page




  26 #include "gc/parallel/objectStartArray.inline.hpp"
  27 #include "gc/shared/cardTableModRefBS.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/java.hpp"
  31 #include "services/memTracker.hpp"
  32 
  33 void ObjectStartArray::initialize(MemRegion reserved_region) {
  34   // We're based on the assumption that we use the same
  35   // size blocks as the card table.
  36   assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
  37   assert((int)block_size <= 512, "block_size must be less than or equal to 512");
  38 
  39   // Calculate how much space must be reserved
  40   _reserved_region = reserved_region;
  41 
  42   size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words;
  43   assert(bytes_to_reserve > 0, "Sanity");
  44 
  45   bytes_to_reserve =
  46     align_size_up(bytes_to_reserve, os::vm_allocation_granularity());
  47 
  48   // Do not use large-pages for the backing store. The one large page region
  49   // will be used for the heap proper.
  50   ReservedSpace backing_store(bytes_to_reserve);
  51   if (!backing_store.is_reserved()) {
  52     vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
  53   }
  54   MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC);
  55 
  56   // We do not commit any memory initially
  57   if (!_virtual_space.initialize(backing_store, 0)) {
  58     vm_exit_during_initialization("Could not commit space for ObjectStartArray");
  59   }
  60 
  61   _raw_base = (jbyte*)_virtual_space.low_boundary();
  62 
  63   if (_raw_base == NULL) {
  64     vm_exit_during_initialization("Could not get raw_base address");
  65   }
  66 


  72   _covered_region.set_start(reserved_region.start());
  73   _covered_region.set_word_size(0);
  74 
  75   _blocks_region.set_start((HeapWord*)_raw_base);
  76   _blocks_region.set_word_size(0);
  77 }
  78 
  79 void ObjectStartArray::set_covered_region(MemRegion mr) {
  80   assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");
  81   assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");
  82 
  83   HeapWord* low_bound  = mr.start();
  84   HeapWord* high_bound = mr.end();
  85   assert((uintptr_t(low_bound)  & (block_size - 1))  == 0, "heap must start at block boundary");
  86   assert((uintptr_t(high_bound) & (block_size - 1))  == 0, "heap must end at block boundary");
  87 
  88   size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words;
  89 
  90   // Only commit memory in page sized chunks
  91   requested_blocks_size_in_bytes =
  92     align_size_up(requested_blocks_size_in_bytes, os::vm_page_size());
  93 
  94   _covered_region = mr;
  95 
  96   size_t current_blocks_size_in_bytes = _blocks_region.byte_size();
  97 
  98   if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {
  99     // Expand
 100     size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
 101     if (!_virtual_space.expand_by(expand_by)) {
 102       vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion");
 103     }
 104     // Clear *only* the newly allocated region
 105     memset(_blocks_region.end(), clean_block, expand_by);
 106   }
 107 
 108   if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {
 109     // Shrink
 110     size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;
 111     _virtual_space.shrink_by(shrink_by);
 112   }




  26 #include "gc/parallel/objectStartArray.inline.hpp"
  27 #include "gc/shared/cardTableModRefBS.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/java.hpp"
  31 #include "services/memTracker.hpp"
  32 
  33 void ObjectStartArray::initialize(MemRegion reserved_region) {
  34   // We're based on the assumption that we use the same
  35   // size blocks as the card table.
  36   assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
  37   assert((int)block_size <= 512, "block_size must be less than or equal to 512");
  38 
  39   // Calculate how much space must be reserved
  40   _reserved_region = reserved_region;
  41 
  42   size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words;
  43   assert(bytes_to_reserve > 0, "Sanity");
  44 
  45   bytes_to_reserve =
  46     align_up(bytes_to_reserve, os::vm_allocation_granularity());
  47 
  48   // Do not use large-pages for the backing store. The one large page region
  49   // will be used for the heap proper.
  50   ReservedSpace backing_store(bytes_to_reserve);
  51   if (!backing_store.is_reserved()) {
  52     vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
  53   }
  54   MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC);
  55 
  56   // We do not commit any memory initially
  57   if (!_virtual_space.initialize(backing_store, 0)) {
  58     vm_exit_during_initialization("Could not commit space for ObjectStartArray");
  59   }
  60 
  61   _raw_base = (jbyte*)_virtual_space.low_boundary();
  62 
  63   if (_raw_base == NULL) {
  64     vm_exit_during_initialization("Could not get raw_base address");
  65   }
  66 


  72   _covered_region.set_start(reserved_region.start());
  73   _covered_region.set_word_size(0);
  74 
  75   _blocks_region.set_start((HeapWord*)_raw_base);
  76   _blocks_region.set_word_size(0);
  77 }
  78 
  79 void ObjectStartArray::set_covered_region(MemRegion mr) {
  80   assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");
  81   assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");
  82 
  83   HeapWord* low_bound  = mr.start();
  84   HeapWord* high_bound = mr.end();
  85   assert((uintptr_t(low_bound)  & (block_size - 1))  == 0, "heap must start at block boundary");
  86   assert((uintptr_t(high_bound) & (block_size - 1))  == 0, "heap must end at block boundary");
  87 
  88   size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words;
  89 
  90   // Only commit memory in page sized chunks
  91   requested_blocks_size_in_bytes =
  92     align_up(requested_blocks_size_in_bytes, os::vm_page_size());
  93 
  94   _covered_region = mr;
  95 
  96   size_t current_blocks_size_in_bytes = _blocks_region.byte_size();
  97 
  98   if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {
  99     // Expand
 100     size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
 101     if (!_virtual_space.expand_by(expand_by)) {
 102       vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion");
 103     }
 104     // Clear *only* the newly allocated region
 105     memset(_blocks_region.end(), clean_block, expand_by);
 106   }
 107 
 108   if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {
 109     // Shrink
 110     size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;
 111     _virtual_space.shrink_by(shrink_by);
 112   }


< prev index next >