< prev index next >

src/hotspot/share/gc/parallel/mutableSpace.cpp

Print this page
rev 60637 : 8252221: Use multiple workers for Parallel GC pre-touching
Reviewed-by:
Contributed-by: amith.pawar@gmail.com


  43 MutableSpace::~MutableSpace() {
  44   delete _mangler;
  45 }
  46 
  47 void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
  48   if (!mr.is_empty()) {
  49     size_t page_size = UseLargePages ? alignment() : os::vm_page_size();
  50     HeapWord *start = align_up(mr.start(), page_size);
  51     HeapWord *end =   align_down(mr.end(), page_size);
  52     if (end > start) {
  53       size_t size = pointer_delta(end, start, sizeof(char));
  54       if (clear_space) {
  55         // Prefer page reallocation to migration.
  56         os::free_memory((char*)start, size, page_size);
  57       }
  58       os::numa_make_global((char*)start, size);
  59     }
  60   }
  61 }
  62 
  63 void MutableSpace::pretouch_pages(MemRegion mr) {











































  64   os::pretouch_memory(mr.start(), mr.end());
  65 }

  66 
  67 void MutableSpace::initialize(MemRegion mr,
  68                               bool clear_space,
  69                               bool mangle_space,
  70                               bool setup_pages) {

  71 
  72   assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
  73          "invalid space boundaries");
  74 
  75   if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
  76     // The space may move left and right or expand/shrink.
  77     // We'd like to enforce the desired page placement.
  78     MemRegion head, tail;
  79     if (last_setup_region().is_empty()) {
  80       // If it's the first initialization don't limit the amount of work.
  81       head = mr;
  82       tail = MemRegion(mr.end(), mr.end());
  83     } else {
  84       // Is there an intersection with the address space?
  85       MemRegion intersection = last_setup_region().intersection(mr);
  86       if (intersection.is_empty()) {
  87         intersection = MemRegion(mr.end(), mr.end());
  88       }
  89       // All the sizes below are in words.
  90       size_t head_size = 0, tail_size = 0;


  97       // Limit the amount of page manipulation if necessary.
  98       if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
  99         const size_t change_size = head_size + tail_size;
 100         const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
 101         head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
 102                          head_size);
 103         tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
 104                          tail_size);
 105       }
 106       head = MemRegion(intersection.start() - head_size, intersection.start());
 107       tail = MemRegion(intersection.end(), intersection.end() + tail_size);
 108     }
 109     assert(mr.contains(head) && mr.contains(tail), "Sanity");
 110 
 111     if (UseNUMA) {
 112       numa_setup_pages(head, clear_space);
 113       numa_setup_pages(tail, clear_space);
 114     }
 115 
 116     if (AlwaysPreTouch) {
 117       pretouch_pages(head);
 118       pretouch_pages(tail);

























 119     }
 120 
 121     // Remember where we stopped so that we can continue later.
 122     set_last_setup_region(MemRegion(head.start(), tail.end()));
 123   }
 124 
 125   set_bottom(mr.start());
 126   set_end(mr.end());
 127 
 128   if (clear_space) {
 129     clear(mangle_space);
 130   }
 131 }
 132 
 133 void MutableSpace::clear(bool mangle_space) {
 134   set_top(bottom());
 135   if (ZapUnusedHeapArea && mangle_space) {
 136     mangle_unused_area();
 137   }
 138 }




  43 MutableSpace::~MutableSpace() {
  44   delete _mangler;
  45 }
  46 
  47 void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
  48   if (!mr.is_empty()) {
  49     size_t page_size = UseLargePages ? alignment() : os::vm_page_size();
  50     HeapWord *start = align_up(mr.start(), page_size);
  51     HeapWord *end =   align_down(mr.end(), page_size);
  52     if (end > start) {
  53       size_t size = pointer_delta(end, start, sizeof(char));
  54       if (clear_space) {
  55         // Prefer page reallocation to migration.
  56         os::free_memory((char*)start, size, page_size);
  57       }
  58       os::numa_make_global((char*)start, size);
  59     }
  60   }
  61 }
  62 
  63 class PGCPretouchTask: public AbstractGangTask {
  64   char * volatile _cur_addr;
  65   char * const _start_addr;
  66   char * const _end_addr;
  67   size_t _page_size;
  68   uint _total_workers;
  69 
  70 public:
  71   PGCPretouchTask(MemRegion mr, size_t page_size) :
  72     AbstractGangTask("ParallelGC PreTouch"),
  73     _cur_addr((char*)mr.start()),
  74     _start_addr((char*)mr.start()),
  75     _end_addr((char*)mr.end()),
  76     _page_size(0) {
  77 #ifdef LINUX
  78     _page_size = UseTransparentHugePages ? (size_t)os::vm_page_size() : page_size;
  79 #else
  80     _page_size = page_size;
  81 #endif
  82   }
  83 
  84   virtual void work(uint worker_id) {
  85 
  86     // Get chunk size
  87     size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
  88 
  89     char *touch_addr = Atomic::fetch_and_add(&_cur_addr, actual_chunk_size);
  90 
  91     if (touch_addr > _end_addr) {
  92       return ;
  93     }
  94 
  95     char *end_addr   = touch_addr + actual_chunk_size;
  96     if (end_addr > _end_addr)
  97        end_addr = _end_addr;
  98 
  99     os::pretouch_memory(touch_addr, end_addr, _page_size);
 100   }
 101 
 102   void set_total_workers(uint total_workers) { _total_workers = total_workers; }
 103 
 104   size_t chunk_size() { return align_down((_end_addr-_start_addr)/_total_workers, _page_size); }
 105 
 106   static void pretouch_pages(MemRegion mr) {
 107     os::pretouch_memory(mr.start(), mr.end());
 108   }
 109 };
 110 
 111 void MutableSpace::initialize(MemRegion mr,
 112                               bool clear_space,
 113                               bool mangle_space,
 114                               bool setup_pages,
 115                               WorkGang *pretouch_gang) {
 116 
 117   assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
 118          "invalid space boundaries");
 119 
 120   if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
 121     // The space may move left and right or expand/shrink.
 122     // We'd like to enforce the desired page placement.
 123     MemRegion head, tail;
 124     if (last_setup_region().is_empty()) {
 125       // If it's the first initialization don't limit the amount of work.
 126       head = mr;
 127       tail = MemRegion(mr.end(), mr.end());
 128     } else {
 129       // Is there an intersection with the address space?
 130       MemRegion intersection = last_setup_region().intersection(mr);
 131       if (intersection.is_empty()) {
 132         intersection = MemRegion(mr.end(), mr.end());
 133       }
 134       // All the sizes below are in words.
 135       size_t head_size = 0, tail_size = 0;


 142       // Limit the amount of page manipulation if necessary.
 143       if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
 144         const size_t change_size = head_size + tail_size;
 145         const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
 146         head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
 147                          head_size);
 148         tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
 149                          tail_size);
 150       }
 151       head = MemRegion(intersection.start() - head_size, intersection.start());
 152       tail = MemRegion(intersection.end(), intersection.end() + tail_size);
 153     }
 154     assert(mr.contains(head) && mr.contains(tail), "Sanity");
 155 
 156     if (UseNUMA) {
 157       numa_setup_pages(head, clear_space);
 158       numa_setup_pages(tail, clear_space);
 159     }
 160 
 161     if (AlwaysPreTouch) {
 162 
 163       size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 164       PGCPretouchTask pretouch_task(head, page_size);
 165 
 166       if (pretouch_gang) {
 167         uint num_workers = pretouch_gang->total_workers();
 168         pretouch_task.set_total_workers(num_workers);
 169 
 170         log_debug(gc, heap)("Running %s with %u workers for pre-touching " SIZE_FORMAT "B.",
 171                         pretouch_task.name(), num_workers, head.byte_size());
 172 
 173         pretouch_gang->run_task(&pretouch_task, num_workers);
 174 
 175       } else {
 176 
 177         if(head.byte_size() != 0) {
 178           log_debug(gc, heap)("Running %s with 1 thread for pre-touching " SIZE_FORMAT "B.",
 179                                pretouch_task.name(), head.byte_size());
 180           PGCPretouchTask::pretouch_pages(head);
 181         }
 182       }
 183 
 184       if(tail.byte_size() != 0) {
 185         log_debug(gc, heap)("Running %s with 1 thread for pre-touching " SIZE_FORMAT "B.",
 186                              pretouch_task.name(), tail.byte_size());
 187         PGCPretouchTask::pretouch_pages(tail);
 188       }
 189     }
 190 
 191     // Remember where we stopped so that we can continue later.
 192     set_last_setup_region(MemRegion(head.start(), tail.end()));
 193   }
 194 
 195   set_bottom(mr.start());
 196   set_end(mr.end());
 197 
 198   if (clear_space) {
 199     clear(mangle_space);
 200   }
 201 }
 202 
 203 void MutableSpace::clear(bool mangle_space) {
 204   set_top(bottom());
 205   if (ZapUnusedHeapArea && mangle_space) {
 206     mangle_unused_area();
 207   }
 208 }


< prev index next >