src/share/vm/memory/cardTableModRefBS.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/cardTableModRefBS.cpp

Print this page


   1 /*
   2  * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  36 #include "services/memTracker.hpp"
  37 #include "utilities/macros.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_LIR.hpp"
  40 #include "c1/c1_LIRGenerator.hpp"
  41 #endif
  42 
  43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  44 // enumerate ref fields that have been modified (since the last
  45 // enumeration.)
  46 
  47 size_t CardTableModRefBS::compute_byte_map_size()
  48 {
  49   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  50                                         "uninitialized, check declaration order");
  51   assert(_page_size != 0, "uninitialized, check declaration order");
  52   const size_t granularity = os::vm_allocation_granularity();
  53   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  54 }
  55 
  56 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap) :
  57   ModRefBarrierSet(),
  58   _whole_heap(whole_heap),
  59   _guard_index(0),
  60   _guard_region(),
  61   _last_valid_index(0),
  62   _page_size(os::vm_page_size()),
  63   _byte_map_size(0),
  64   _covered(NULL),
  65   _committed(NULL),
  66   _cur_covered_regions(0),
  67   _byte_map(NULL),
  68   byte_map_base(NULL),
  69   // LNC functionality
  70   _lowest_non_clean(NULL),
  71   _lowest_non_clean_chunk_size(NULL),
  72   _lowest_non_clean_base_chunk_index(NULL),
  73   _last_LNC_resizing_collection(NULL)
  74 {
  75   _kind = BarrierSet::CardTableModRef;
  76 
  77   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  78   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
  79 
  80   assert(card_size <= 512, "card_size must be less than 512"); // why?
  81 
  82   _covered   = new MemRegion[_max_covered_regions];
  83   if (_covered == NULL) {
  84     vm_exit_during_initialization("Could not allocate card table covered region set.");
  85   }
  86 }
  87 
  88 void CardTableModRefBS::initialize() {
  89   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  90   _last_valid_index = _guard_index - 1;
  91 
  92   _byte_map_size = compute_byte_map_size();
  93 
  94   HeapWord* low_bound  = _whole_heap.start();
  95   HeapWord* high_bound = _whole_heap.end();
  96 


   1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  36 #include "services/memTracker.hpp"
  37 #include "utilities/macros.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_LIR.hpp"
  40 #include "c1/c1_LIRGenerator.hpp"
  41 #endif
  42 
  43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  44 // enumerate ref fields that have been modified (since the last
  45 // enumeration.)
  46 
  47 size_t CardTableModRefBS::compute_byte_map_size()
  48 {
  49   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  50                                         "uninitialized, check declaration order");
  51   assert(_page_size != 0, "uninitialized, check declaration order");
  52   const size_t granularity = os::vm_allocation_granularity();
  53   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  54 }
  55 
  56 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, BarrierSet::Name kind) :
  57   ModRefBarrierSet(kind),
  58   _whole_heap(whole_heap),
  59   _guard_index(0),
  60   _guard_region(),
  61   _last_valid_index(0),
  62   _page_size(os::vm_page_size()),
  63   _byte_map_size(0),
  64   _covered(NULL),
  65   _committed(NULL),
  66   _cur_covered_regions(0),
  67   _byte_map(NULL),
  68   byte_map_base(NULL),
  69   // LNC functionality
  70   _lowest_non_clean(NULL),
  71   _lowest_non_clean_chunk_size(NULL),
  72   _lowest_non_clean_base_chunk_index(NULL),
  73   _last_LNC_resizing_collection(NULL)
  74 {


  75   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  76   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
  77 
  78   assert(card_size <= 512, "card_size must be less than 512"); // why?
  79 
  80   _covered   = new MemRegion[_max_covered_regions];
  81   if (_covered == NULL) {
  82     vm_exit_during_initialization("Could not allocate card table covered region set.");
  83   }
  84 }
  85 
  86 void CardTableModRefBS::initialize() {
  87   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  88   _last_valid_index = _guard_index - 1;
  89 
  90   _byte_map_size = compute_byte_map_size();
  91 
  92   HeapWord* low_bound  = _whole_heap.start();
  93   HeapWord* high_bound = _whole_heap.end();
  94 


src/share/vm/memory/cardTableModRefBS.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File