1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/parallel/mutableSpace.hpp"
  27 #include "gc/shared/spaceDecorator.hpp"
  28 #include "memory/iterator.inline.hpp"
  29 #include "memory/universe.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "runtime/safepoint.hpp"
  33 #include "runtime/thread.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/macros.hpp"
  36 
  37 MutableSpace::MutableSpace(size_t alignment): ImmutableSpace(), _alignment(alignment), _top(NULL) {
  38   assert(MutableSpace::alignment() % os::vm_page_size() == 0,
  39          "Space should be aligned");
  40   _mangler = new MutableSpaceMangler(this);
  41 }
  42 
  43 MutableSpace::~MutableSpace() {
  44   delete _mangler;
  45 }
  46 
  47 void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
  48   if (!mr.is_empty()) {
  49     size_t page_size = UseLargePages ? alignment() : os::vm_page_size();
  50     HeapWord *start = align_up(mr.start(), page_size);
  51     HeapWord *end =   align_down(mr.end(), page_size);
  52     if (end > start) {
  53       size_t size = pointer_delta(end, start, sizeof(char));
  54       if (clear_space) {
  55         // Prefer page reallocation to migration.
  56         os::free_memory((char*)start, size, page_size);
  57       }
  58       os::numa_make_global((char*)start, size);
  59     }
  60   }
  61 }
  62 
  63 void MutableSpace::pretouch_pages(MemRegion mr) {
  64   os::pretouch_memory(mr.start(), mr.end());
  65 }
  66 
  67 void MutableSpace::initialize(MemRegion mr,
  68                               bool clear_space,
  69                               bool mangle_space,
  70                               bool setup_pages) {
  71 
  72   assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
  73          "invalid space boundaries");
  74 
  75   if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
  76     // The space may move left and right or expand/shrink.
  77     // We'd like to enforce the desired page placement.
  78     MemRegion head, tail;
  79     if (last_setup_region().is_empty()) {
  80       // If it's the first initialization don't limit the amount of work.
  81       head = mr;
  82       tail = MemRegion(mr.end(), mr.end());
  83     } else {
  84       // Is there an intersection with the address space?
  85       MemRegion intersection = last_setup_region().intersection(mr);
  86       if (intersection.is_empty()) {
  87         intersection = MemRegion(mr.end(), mr.end());
  88       }
  89       // All the sizes below are in words.
  90       size_t head_size = 0, tail_size = 0;
  91       if (mr.start() <= intersection.start()) {
  92         head_size = pointer_delta(intersection.start(), mr.start());
  93       }
  94       if(intersection.end() <= mr.end()) {
  95         tail_size = pointer_delta(mr.end(), intersection.end());
  96       }
  97       // Limit the amount of page manipulation if necessary.
  98       if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
  99         const size_t change_size = head_size + tail_size;
 100         const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
 101         head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
 102                          head_size);
 103         tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
 104                          tail_size);
 105       }
 106       head = MemRegion(intersection.start() - head_size, intersection.start());
 107       tail = MemRegion(intersection.end(), intersection.end() + tail_size);
 108     }
 109     assert(mr.contains(head) && mr.contains(tail), "Sanity");
 110 
 111     if (UseNUMA) {
 112       numa_setup_pages(head, clear_space);
 113       numa_setup_pages(tail, clear_space);
 114     }
 115 
 116     if (AlwaysPreTouch) {
 117       pretouch_pages(head);
 118       pretouch_pages(tail);
 119     }
 120 
 121     // Remember where we stopped so that we can continue later.
 122     set_last_setup_region(MemRegion(head.start(), tail.end()));
 123   }
 124 
 125   set_bottom(mr.start());
 126   set_end(mr.end());
 127 
 128   if (clear_space) {
 129     clear(mangle_space);
 130   }
 131 }
 132 
 133 void MutableSpace::clear(bool mangle_space) {
 134   set_top(bottom());
 135   if (ZapUnusedHeapArea && mangle_space) {
 136     mangle_unused_area();
 137   }
 138 }
 139 
 140 #ifndef PRODUCT
 141 void MutableSpace::check_mangled_unused_area(HeapWord* limit) {
 142   mangler()->check_mangled_unused_area(limit);
 143 }
 144 
 145 void MutableSpace::check_mangled_unused_area_complete() {
 146   mangler()->check_mangled_unused_area_complete();
 147 }
 148 
 149 // Mangle only the unused space that has not previously
 150 // been mangled and that has not been allocated since being
 151 // mangled.
 152 void MutableSpace::mangle_unused_area() {
 153   mangler()->mangle_unused_area();
 154 }
 155 
 156 void MutableSpace::mangle_unused_area_complete() {
 157   mangler()->mangle_unused_area_complete();
 158 }
 159 
 160 void MutableSpace::mangle_region(MemRegion mr) {
 161   SpaceMangler::mangle_region(mr);
 162 }
 163 
 164 void MutableSpace::set_top_for_allocations(HeapWord* v) {
 165   mangler()->set_top_for_allocations(v);
 166 }
 167 
 168 void MutableSpace::set_top_for_allocations() {
 169   mangler()->set_top_for_allocations(top());
 170 }
 171 #endif
 172 
 173 // This version requires locking. */
 174 HeapWord* MutableSpace::allocate(size_t size) {
 175   assert(Heap_lock->owned_by_self() ||
 176          (SafepointSynchronize::is_at_safepoint() &&
 177           Thread::current()->is_VM_thread()),
 178          "not locked");
 179   HeapWord* obj = top();
 180   if (pointer_delta(end(), obj) >= size) {
 181     HeapWord* new_top = obj + size;
 182     set_top(new_top);
 183     assert(is_object_aligned(obj) && is_object_aligned(new_top),
 184            "checking alignment");
 185     return obj;
 186   } else {
 187     return NULL;
 188   }
 189 }
 190 
 191 // This version is lock-free.
 192 HeapWord* MutableSpace::cas_allocate(size_t size) {
 193   do {
 194     HeapWord* obj = top();
 195     if (pointer_delta(end(), obj) >= size) {
 196       HeapWord* new_top = obj + size;
 197       HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
 198       // result can be one of two:
 199       //  the old top value: the exchange succeeded
 200       //  otherwise: the new value of the top is returned.
 201       if (result != obj) {
 202         continue; // another thread beat us to the allocation, try again
 203       }
 204       assert(is_object_aligned(obj) && is_object_aligned(new_top),
 205              "checking alignment");
 206       return obj;
 207     } else {
 208       return NULL;
 209     }
 210   } while (true);
 211 }
 212 
 213 // Try to deallocate previous allocation. Returns true upon success.
 214 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
 215   HeapWord* expected_top = obj + size;
 216   return Atomic::cmpxchg(obj, top_addr(), expected_top) == expected_top;
 217 }
 218 
 219 void MutableSpace::oop_iterate(OopIterateClosure* cl) {
 220   HeapWord* obj_addr = bottom();
 221   HeapWord* t = top();
 222   // Could call objects iterate, but this is easier.
 223   while (obj_addr < t) {
 224     obj_addr += oop(obj_addr)->oop_iterate_size(cl);
 225   }
 226 }
 227 
 228 void MutableSpace::object_iterate(ObjectClosure* cl) {
 229   HeapWord* p = bottom();
 230   while (p < top()) {
 231     cl->do_object(oop(p));
 232     p += oop(p)->size();
 233   }
 234 }
 235 
 236 void MutableSpace::print_short() const { print_short_on(tty); }
 237 void MutableSpace::print_short_on( outputStream* st) const {
 238   st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
 239             (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
 240 }
 241 
 242 void MutableSpace::print() const { print_on(tty); }
 243 void MutableSpace::print_on(outputStream* st) const {
 244   MutableSpace::print_short_on(st);
 245   st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")",
 246                  p2i(bottom()), p2i(top()), p2i(end()));
 247 }
 248 
 249 void MutableSpace::verify() {
 250   HeapWord* p = bottom();
 251   HeapWord* t = top();
 252   HeapWord* prev_p = NULL;
 253   while (p < t) {
 254     oopDesc::verify(oop(p));
 255     prev_p = p;
 256     p += oop(p)->size();
 257   }
 258   guarantee(p == top(), "end of last object must match end of space");
 259 }