1 /*
   2  * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_mutableSpace.cpp.incl"
  27 
  28 MutableSpace::MutableSpace(size_t alignment): ImmutableSpace(), _top(NULL), _alignment(alignment) {
  29   assert(MutableSpace::alignment() >= 0 &&
  30          MutableSpace::alignment() % os::vm_page_size() == 0,
  31          "Space should be aligned");
  32   _mangler = new MutableSpaceMangler(this);
  33 }
  34 
  35 MutableSpace::~MutableSpace() {
  36   delete _mangler;
  37 }
  38 
  39 void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
  40   if (!mr.is_empty()) {
  41     size_t page_size = UseLargePages ? alignment() : os::vm_page_size();
  42     HeapWord *start = (HeapWord*)round_to((intptr_t) mr.start(), page_size);
  43     HeapWord *end =  (HeapWord*)round_down((intptr_t) mr.end(), page_size);
  44     if (end > start) {
  45       size_t size = pointer_delta(end, start, sizeof(char));
  46       if (clear_space) {
  47         // Prefer page reallocation to migration.
  48         os::free_memory((char*)start, size);
  49       }
  50       os::numa_make_global((char*)start, size);
  51     }
  52   }
  53 }
  54 
  55 void MutableSpace::pretouch_pages(MemRegion mr) {
  56   for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
  57     char t = *p; *p = t;
  58   }
  59 }
  60 
  61 void MutableSpace::initialize(MemRegion mr,
  62                               bool clear_space,
  63                               bool mangle_space,
  64                               bool setup_pages) {
  65 
  66   assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
  67          "invalid space boundaries");
  68 
  69   if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
  70     // The space may move left and right or expand/shrink.
  71     // We'd like to enforce the desired page placement.
  72     MemRegion head, tail;
  73     if (last_setup_region().is_empty()) {
  74       // If it's the first initialization don't limit the amount of work.
  75       head = mr;
  76       tail = MemRegion(mr.end(), mr.end());
  77     } else {
  78       // Is there an intersection with the address space?
  79       MemRegion intersection = last_setup_region().intersection(mr);
  80       if (intersection.is_empty()) {
  81         intersection = MemRegion(mr.end(), mr.end());
  82       }
  83       // All the sizes below are in words.
  84       size_t head_size = 0, tail_size = 0;
  85       if (mr.start() <= intersection.start()) {
  86         head_size = pointer_delta(intersection.start(), mr.start());
  87       }
  88       if(intersection.end() <= mr.end()) {
  89         tail_size = pointer_delta(mr.end(), intersection.end());
  90       }
  91       // Limit the amount of page manipulation if necessary.
  92       if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
  93         const size_t change_size = head_size + tail_size;
  94         const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
  95         head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
  96                          head_size);
  97         tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
  98                          tail_size);
  99       }
 100       head = MemRegion(intersection.start() - head_size, intersection.start());
 101       tail = MemRegion(intersection.end(), intersection.end() + tail_size);
 102     }
 103     assert(mr.contains(head) && mr.contains(tail), "Sanity");
 104 
 105     if (UseNUMA) {
 106       numa_setup_pages(head, clear_space);
 107       numa_setup_pages(tail, clear_space);
 108     }
 109 
 110     if (AlwaysPreTouch) {
 111       pretouch_pages(head);
 112       pretouch_pages(tail);
 113     }
 114 
 115     // Remember where we stopped so that we can continue later.
 116     set_last_setup_region(MemRegion(head.start(), tail.end()));
 117   }
 118 
 119   set_bottom(mr.start());
 120   set_end(mr.end());
 121 
 122   if (clear_space) {
 123     clear(mangle_space);
 124   }
 125 }
 126 
 127 void MutableSpace::clear(bool mangle_space) {
 128   set_top(bottom());
 129   if (ZapUnusedHeapArea && mangle_space) {
 130     mangle_unused_area();
 131   }
 132 }
 133 
 134 #ifndef PRODUCT
 135 void MutableSpace::check_mangled_unused_area(HeapWord* limit) {
 136   mangler()->check_mangled_unused_area(limit);
 137 }
 138 
 139 void MutableSpace::check_mangled_unused_area_complete() {
 140   mangler()->check_mangled_unused_area_complete();
 141 }
 142 
 143 // Mangle only the unused space that has not previously
 144 // been mangled and that has not been allocated since being
 145 // mangled.
 146 void MutableSpace::mangle_unused_area() {
 147   mangler()->mangle_unused_area();
 148 }
 149 
 150 void MutableSpace::mangle_unused_area_complete() {
 151   mangler()->mangle_unused_area_complete();
 152 }
 153 
 154 void MutableSpace::mangle_region(MemRegion mr) {
 155   SpaceMangler::mangle_region(mr);
 156 }
 157 
 158 void MutableSpace::set_top_for_allocations(HeapWord* v) {
 159   mangler()->set_top_for_allocations(v);
 160 }
 161 
 162 void MutableSpace::set_top_for_allocations() {
 163   mangler()->set_top_for_allocations(top());
 164 }
 165 #endif
 166 
 167 // This version requires locking. */
 168 HeapWord* MutableSpace::allocate(size_t size) {
 169   assert(Heap_lock->owned_by_self() ||
 170          (SafepointSynchronize::is_at_safepoint() &&
 171           Thread::current()->is_VM_thread()),
 172          "not locked");
 173   HeapWord* obj = top();
 174   if (pointer_delta(end(), obj) >= size) {
 175     HeapWord* new_top = obj + size;
 176     set_top(new_top);
 177     assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
 178            "checking alignment");
 179     return obj;
 180   } else {
 181     return NULL;
 182   }
 183 }
 184 
 185 // This version is lock-free.
 186 HeapWord* MutableSpace::cas_allocate(size_t size) {
 187   do {
 188     HeapWord* obj = top();
 189     if (pointer_delta(end(), obj) >= size) {
 190       HeapWord* new_top = obj + size;
 191       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
 192       // result can be one of two:
 193       //  the old top value: the exchange succeeded
 194       //  otherwise: the new value of the top is returned.
 195       if (result != obj) {
 196         continue; // another thread beat us to the allocation, try again
 197       }
 198       assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
 199              "checking alignment");
 200       return obj;
 201     } else {
 202       return NULL;
 203     }
 204   } while (true);
 205 }
 206 
 207 // Try to deallocate previous allocation. Returns true upon success.
 208 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
 209   HeapWord* expected_top = obj + size;
 210   return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
 211 }
 212 
 213 void MutableSpace::oop_iterate(OopClosure* cl) {
 214   HeapWord* obj_addr = bottom();
 215   HeapWord* t = top();
 216   // Could call objects iterate, but this is easier.
 217   while (obj_addr < t) {
 218     obj_addr += oop(obj_addr)->oop_iterate(cl);
 219   }
 220 }
 221 
 222 void MutableSpace::object_iterate(ObjectClosure* cl) {
 223   HeapWord* p = bottom();
 224   while (p < top()) {
 225     cl->do_object(oop(p));
 226     p += oop(p)->size();
 227   }
 228 }
 229 
 230 void MutableSpace::print_short() const { print_short_on(tty); }
 231 void MutableSpace::print_short_on( outputStream* st) const {
 232   st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
 233             (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
 234 }
 235 
 236 void MutableSpace::print() const { print_on(tty); }
 237 void MutableSpace::print_on(outputStream* st) const {
 238   MutableSpace::print_short_on(st);
 239   st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")",
 240                  bottom(), top(), end());
 241 }
 242 
 243 void MutableSpace::verify(bool allow_dirty) {
 244   HeapWord* p = bottom();
 245   HeapWord* t = top();
 246   HeapWord* prev_p = NULL;
 247   while (p < t) {
 248     oop(p)->verify();
 249     prev_p = p;
 250     p += oop(p)->size();
 251   }
 252   guarantee(p == top(), "end of last object must match end of space");
 253 }