< prev index next >

src/hotspot/share/gc/parallel/mutableNUMASpace.cpp

Print this page




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/parallel/mutableNUMASpace.hpp"
  27 #include "gc/shared/collectedHeap.hpp"

  28 #include "gc/shared/spaceDecorator.hpp"
  29 #include "memory/allocation.inline.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "runtime/thread.inline.hpp"
  33 #include "runtime/threadSMR.hpp"
  34 #include "utilities/align.hpp"
  35 
  36 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) {
  37   _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true);
  38   _page_size = os::vm_page_size();
  39   _adaptation_cycles = 0;
  40   _samples_count = 0;
  41 
  42 #ifdef LINUX
  43   // Changing the page size can lead to freeing of memory. When using large pages
  44   // and the memory has been both reserved and committed, Linux does not support
  45   // freeing parts of it.
  46     if (UseLargePages && !os::can_commit_large_page_memory()) {
  47       _must_use_large_pages = true;


  78 }
  79 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
  80   // This method should do nothing.
  81 }
  82 void MutableNUMASpace::check_mangled_unused_area_complete() {
  83   // This method should do nothing.
  84 }
  85 #endif  // NOT_PRODUCT
  86 
  87 // There may be unallocated holes in the middle chunks
  88 // that should be filled with dead objects to ensure parsability.
  89 void MutableNUMASpace::ensure_parsability() {
  90   for (int i = 0; i < lgrp_spaces()->length(); i++) {
  91     LGRPSpace *ls = lgrp_spaces()->at(i);
  92     MutableSpace *s = ls->space();
  93     if (s->top() < top()) { // For all spaces preceding the one containing top()
  94       if (s->free_in_words() > 0) {
  95         HeapWord* cur_top = s->top();
  96         size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
  97         while (words_left_to_fill > 0) {
  98           size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
  99           assert(words_to_fill >= CollectedHeap::min_fill_size(),
 100                  "Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
 101                  words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size());
 102           CollectedHeap::fill_with_object(cur_top, words_to_fill);
 103           if (!os::numa_has_static_binding()) {
 104             size_t touched_words = words_to_fill;
 105 #ifndef ASSERT
 106             if (!ZapUnusedHeapArea) {
 107               touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
 108                 touched_words);
 109             }
 110 #endif
 111             MemRegion invalid;
 112             HeapWord *crossing_start = align_up(cur_top, os::vm_page_size());
 113             HeapWord *crossing_end = align_down(cur_top + touched_words, os::vm_page_size());
 114             if (crossing_start != crossing_end) {
 115               // If object header crossed a small page boundary we mark the area
 116               // as invalid rounding it to a page_size().
 117               HeapWord *start = MAX2(align_down(cur_top, page_size()), s->bottom());
 118               HeapWord *end = MIN2(align_up(cur_top + touched_words, page_size()), s->end());
 119               invalid = MemRegion(start, end);
 120             }
 121 
 122             ls->add_invalid_region(invalid);


 718     s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
 719 
 720     set_adaptation_cycles(samples_count());
 721   }
 722 }
 723 
 724 // Set the top of the whole space.
 725 // Mark the the holes in chunks below the top() as invalid.
 726 void MutableNUMASpace::set_top(HeapWord* value) {
 727   bool found_top = false;
 728   for (int i = 0; i < lgrp_spaces()->length();) {
 729     LGRPSpace *ls = lgrp_spaces()->at(i);
 730     MutableSpace *s = ls->space();
 731     HeapWord *top = MAX2(align_down(s->top(), page_size()), s->bottom());
 732 
 733     if (s->contains(value)) {
 734       // Check if setting the chunk's top to a given value would create a hole less than
 735       // a minimal object; assuming that's not the last chunk in which case we don't care.
 736       if (i < lgrp_spaces()->length() - 1) {
 737         size_t remainder = pointer_delta(s->end(), value);
 738         const size_t min_fill_size = CollectedHeap::min_fill_size();
 739         if (remainder < min_fill_size && remainder > 0) {
 740           // Add a minimum size filler object; it will cross the chunk boundary.
 741           CollectedHeap::fill_with_object(value, min_fill_size);
 742           value += min_fill_size;
 743           assert(!s->contains(value), "Should be in the next chunk");
 744           // Restart the loop from the same chunk, since the value has moved
 745           // to the next one.
 746           continue;
 747         }
 748       }
 749 
 750       if (!os::numa_has_static_binding() && top < value && top < s->end()) {
 751         ls->add_invalid_region(MemRegion(top, value));
 752       }
 753       s->set_top(value);
 754       found_top = true;
 755     } else {
 756         if (found_top) {
 757             s->set_top(s->bottom());
 758         } else {
 759           if (!os::numa_has_static_binding() && top < s->end()) {
 760             ls->add_invalid_region(MemRegion(top, s->end()));
 761           }


 792   int lgrp_id = thr->lgrp_id();
 793   if (lgrp_id == -1 || !os::numa_has_group_homing()) {
 794     lgrp_id = os::numa_get_group_id();
 795     thr->set_lgrp_id(lgrp_id);
 796   }
 797 
 798   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
 799 
 800   // It is possible that a new CPU has been hotplugged and
 801   // we haven't reshaped the space accordingly.
 802   if (i == -1) {
 803     i = os::random() % lgrp_spaces()->length();
 804   }
 805 
 806   LGRPSpace* ls = lgrp_spaces()->at(i);
 807   MutableSpace *s = ls->space();
 808   HeapWord *p = s->allocate(size);
 809 
 810   if (p != NULL) {
 811     size_t remainder = s->free_in_words();
 812     if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
 813       s->set_top(s->top() - size);
 814       p = NULL;
 815     }
 816   }
 817   if (p != NULL) {
 818     if (top() < s->top()) { // Keep _top updated.
 819       MutableSpace::set_top(s->top());
 820     }
 821   }
 822   // Make the page allocation happen here if there is no static binding..
 823   if (p != NULL && !os::numa_has_static_binding()) {
 824     for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
 825       *(int*)i = 0;
 826     }
 827   }
 828   if (p == NULL) {
 829     ls->set_allocation_failed();
 830   }
 831   return p;
 832 }


 834 // This version is lock-free.
 835 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
 836   Thread* thr = Thread::current();
 837   int lgrp_id = thr->lgrp_id();
 838   if (lgrp_id == -1 || !os::numa_has_group_homing()) {
 839     lgrp_id = os::numa_get_group_id();
 840     thr->set_lgrp_id(lgrp_id);
 841   }
 842 
 843   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
 844   // It is possible that a new CPU has been hotplugged and
 845   // we haven't reshaped the space accordingly.
 846   if (i == -1) {
 847     i = os::random() % lgrp_spaces()->length();
 848   }
 849   LGRPSpace *ls = lgrp_spaces()->at(i);
 850   MutableSpace *s = ls->space();
 851   HeapWord *p = s->cas_allocate(size);
 852   if (p != NULL) {
 853     size_t remainder = pointer_delta(s->end(), p + size);
 854     if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
 855       if (s->cas_deallocate(p, size)) {
 856         // We were the last to allocate and created a fragment less than
 857         // a minimal object.
 858         p = NULL;
 859       } else {
 860         guarantee(false, "Deallocation should always succeed");
 861       }
 862     }
 863   }
 864   if (p != NULL) {
 865     HeapWord* cur_top, *cur_chunk_top = p + size;
 866     while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
 867       if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) {
 868         break;
 869       }
 870     }
 871   }
 872 
 873   // Make the page allocation happen here if there is no static binding.
 874   if (p != NULL && !os::numa_has_static_binding() ) {




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/parallel/mutableNUMASpace.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/fill.hpp"
  29 #include "gc/shared/spaceDecorator.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/thread.inline.hpp"
  34 #include "runtime/threadSMR.hpp"
  35 #include "utilities/align.hpp"
  36 
  37 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) {
  38   _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true);
  39   _page_size = os::vm_page_size();
  40   _adaptation_cycles = 0;
  41   _samples_count = 0;
  42 
  43 #ifdef LINUX
  44   // Changing the page size can lead to freeing of memory. When using large pages
  45   // and the memory has been both reserved and committed, Linux does not support
  46   // freeing parts of it.
  47     if (UseLargePages && !os::can_commit_large_page_memory()) {
  48       _must_use_large_pages = true;


  79 }
  80 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
  81   // This method should do nothing.
  82 }
  83 void MutableNUMASpace::check_mangled_unused_area_complete() {
  84   // This method should do nothing.
  85 }
  86 #endif  // NOT_PRODUCT
  87 
  88 // There may be unallocated holes in the middle chunks
  89 // that should be filled with dead objects to ensure parsability.
  90 void MutableNUMASpace::ensure_parsability() {
  91   for (int i = 0; i < lgrp_spaces()->length(); i++) {
  92     LGRPSpace *ls = lgrp_spaces()->at(i);
  93     MutableSpace *s = ls->space();
  94     if (s->top() < top()) { // For all spaces preceding the one containing top()
  95       if (s->free_in_words() > 0) {
  96         HeapWord* cur_top = s->top();
  97         size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
  98         while (words_left_to_fill > 0) {
  99           size_t words_to_fill = MIN2(words_left_to_fill, Fill::max_size());
 100           assert(words_to_fill >= Fill::min_size(),
 101                  "Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
 102                  words_to_fill, words_left_to_fill, Fill::max_size());
 103           Fill::range(cur_top, words_to_fill);
 104           if (!os::numa_has_static_binding()) {
 105             size_t touched_words = words_to_fill;
 106 #ifndef ASSERT
 107             if (!ZapUnusedHeapArea) {
 108               touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
 109                 touched_words);
 110             }
 111 #endif
 112             MemRegion invalid;
 113             HeapWord *crossing_start = align_up(cur_top, os::vm_page_size());
 114             HeapWord *crossing_end = align_down(cur_top + touched_words, os::vm_page_size());
 115             if (crossing_start != crossing_end) {
 116               // If object header crossed a small page boundary we mark the area
 117               // as invalid rounding it to a page_size().
 118               HeapWord *start = MAX2(align_down(cur_top, page_size()), s->bottom());
 119               HeapWord *end = MIN2(align_up(cur_top + touched_words, page_size()), s->end());
 120               invalid = MemRegion(start, end);
 121             }
 122 
 123             ls->add_invalid_region(invalid);


 719     s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
 720 
 721     set_adaptation_cycles(samples_count());
 722   }
 723 }
 724 
 725 // Set the top of the whole space.
 726 // Mark the the holes in chunks below the top() as invalid.
 727 void MutableNUMASpace::set_top(HeapWord* value) {
 728   bool found_top = false;
 729   for (int i = 0; i < lgrp_spaces()->length();) {
 730     LGRPSpace *ls = lgrp_spaces()->at(i);
 731     MutableSpace *s = ls->space();
 732     HeapWord *top = MAX2(align_down(s->top(), page_size()), s->bottom());
 733 
 734     if (s->contains(value)) {
 735       // Check if setting the chunk's top to a given value would create a hole less than
 736       // a minimal object; assuming that's not the last chunk in which case we don't care.
 737       if (i < lgrp_spaces()->length() - 1) {
 738         size_t remainder = pointer_delta(s->end(), value);
 739         const size_t min_fill_size = Fill::min_size();
 740         if (remainder < min_fill_size && remainder > 0) {
 741           // Add a minimum size filler object; it will cross the chunk boundary.
 742           Fill::range(value, min_fill_size);
 743           value += min_fill_size;
 744           assert(!s->contains(value), "Should be in the next chunk");
 745           // Restart the loop from the same chunk, since the value has moved
 746           // to the next one.
 747           continue;
 748         }
 749       }
 750 
 751       if (!os::numa_has_static_binding() && top < value && top < s->end()) {
 752         ls->add_invalid_region(MemRegion(top, value));
 753       }
 754       s->set_top(value);
 755       found_top = true;
 756     } else {
 757         if (found_top) {
 758             s->set_top(s->bottom());
 759         } else {
 760           if (!os::numa_has_static_binding() && top < s->end()) {
 761             ls->add_invalid_region(MemRegion(top, s->end()));
 762           }


 793   int lgrp_id = thr->lgrp_id();
 794   if (lgrp_id == -1 || !os::numa_has_group_homing()) {
 795     lgrp_id = os::numa_get_group_id();
 796     thr->set_lgrp_id(lgrp_id);
 797   }
 798 
 799   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
 800 
 801   // It is possible that a new CPU has been hotplugged and
 802   // we haven't reshaped the space accordingly.
 803   if (i == -1) {
 804     i = os::random() % lgrp_spaces()->length();
 805   }
 806 
 807   LGRPSpace* ls = lgrp_spaces()->at(i);
 808   MutableSpace *s = ls->space();
 809   HeapWord *p = s->allocate(size);
 810 
 811   if (p != NULL) {
 812     size_t remainder = s->free_in_words();
 813     if (remainder < Fill::min_size() && remainder > 0) {
 814       s->set_top(s->top() - size);
 815       p = NULL;
 816     }
 817   }
 818   if (p != NULL) {
 819     if (top() < s->top()) { // Keep _top updated.
 820       MutableSpace::set_top(s->top());
 821     }
 822   }
 823   // Make the page allocation happen here if there is no static binding..
 824   if (p != NULL && !os::numa_has_static_binding()) {
 825     for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
 826       *(int*)i = 0;
 827     }
 828   }
 829   if (p == NULL) {
 830     ls->set_allocation_failed();
 831   }
 832   return p;
 833 }


 835 // This version is lock-free.
 836 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
 837   Thread* thr = Thread::current();
 838   int lgrp_id = thr->lgrp_id();
 839   if (lgrp_id == -1 || !os::numa_has_group_homing()) {
 840     lgrp_id = os::numa_get_group_id();
 841     thr->set_lgrp_id(lgrp_id);
 842   }
 843 
 844   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
 845   // It is possible that a new CPU has been hotplugged and
 846   // we haven't reshaped the space accordingly.
 847   if (i == -1) {
 848     i = os::random() % lgrp_spaces()->length();
 849   }
 850   LGRPSpace *ls = lgrp_spaces()->at(i);
 851   MutableSpace *s = ls->space();
 852   HeapWord *p = s->cas_allocate(size);
 853   if (p != NULL) {
 854     size_t remainder = pointer_delta(s->end(), p + size);
 855     if (remainder < Fill::min_size() && remainder > 0) {
 856       if (s->cas_deallocate(p, size)) {
 857         // We were the last to allocate and created a fragment less than
 858         // a minimal object.
 859         p = NULL;
 860       } else {
 861         guarantee(false, "Deallocation should always succeed");
 862       }
 863     }
 864   }
 865   if (p != NULL) {
 866     HeapWord* cur_top, *cur_chunk_top = p + size;
 867     while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
 868       if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) {
 869         break;
 870       }
 871     }
 872   }
 873 
 874   // Make the page allocation happen here if there is no static binding.
 875   if (p != NULL && !os::numa_has_static_binding() ) {


< prev index next >