< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp

Print this page




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
  26 #define SHARE_VM_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
  27 
  28 #include "gc/parallel/parallelScavengeHeap.hpp"
  29 #include "gc/parallel/parMarkBitMap.inline.hpp"
  30 #include "gc/parallel/psParallelCompact.hpp"
  31 #include "gc/shared/collectedHeap.hpp"


  32 #include "oops/klass.hpp"
  33 #include "oops/oop.inline.hpp"
  34 
  35 inline bool PSParallelCompact::is_marked(oop obj) {
  36   return mark_bitmap()->is_marked(obj);
  37 }
  38 
  39 inline double PSParallelCompact::normal_distribution(double density) {
  40   assert(_dwl_initialized, "uninitialized");
  41   const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
  42   return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
  43 }
  44 
  45 inline bool PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
  46                                                            idx_t bit) {
  47   assert(bit > 0, "cannot call this for the first bit/region");
  48   assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
  49          "sanity check");
  50 
  51   // Dead space crosses the boundary if (1) a partial object does not extend


  88 inline void PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) {
  89   assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
  90          "must move left or to a different space");
  91   assert(is_object_aligned(old_addr) && is_object_aligned(new_addr),
  92          "checking alignment");
  93 }
  94 #endif // ASSERT
  95 
  96 inline bool PSParallelCompact::mark_obj(oop obj) {
  97   const int obj_size = obj->size();
  98   if (mark_bitmap()->mark_obj(obj, obj_size)) {
  99     _summary_data.add_obj(obj, obj_size);
 100     return true;
 101   } else {
 102     return false;
 103   }
 104 }
 105 
 106 template <class T>
 107 inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
 108   T heap_oop = oopDesc::load_heap_oop(p);
 109   if (!oopDesc::is_null(heap_oop)) {
 110     oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
 111     assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
 112 
 113     oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
 114     assert(new_obj != NULL,                    // is forwarding ptr?
 115            "should be forwarded");
 116     // Just always do the update unconditionally?
 117     if (new_obj != NULL) {
 118       assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
 119              "should be in object space");
 120       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 121     }
 122   }
 123 }
 124 
 125 template <typename T>
 126 void PSParallelCompact::AdjustPointerClosure::do_oop_nv(T* p) {
 127   adjust_pointer(p, _cm);
 128 }
 129 
 130 inline void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { do_oop_nv(p); }
 131 inline void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
 132 
 133 #endif // SHARE_VM_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
  26 #define SHARE_VM_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
  27 
  28 #include "gc/parallel/parallelScavengeHeap.hpp"
  29 #include "gc/parallel/parMarkBitMap.inline.hpp"
  30 #include "gc/parallel/psParallelCompact.hpp"
  31 #include "gc/shared/collectedHeap.hpp"
  32 #include "oops/access.inline.hpp"
  33 #include "oops/compressedOops.inline.hpp"
  34 #include "oops/klass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 
  37 inline bool PSParallelCompact::is_marked(oop obj) {
  38   return mark_bitmap()->is_marked(obj);
  39 }
  40 
  41 inline double PSParallelCompact::normal_distribution(double density) {
  42   assert(_dwl_initialized, "uninitialized");
  43   const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
  44   return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
  45 }
  46 
  47 inline bool PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
  48                                                            idx_t bit) {
  49   assert(bit > 0, "cannot call this for the first bit/region");
  50   assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
  51          "sanity check");
  52 
  53   // Dead space crosses the boundary if (1) a partial object does not extend


  90 inline void PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) {
  91   assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
  92          "must move left or to a different space");
  93   assert(is_object_aligned(old_addr) && is_object_aligned(new_addr),
  94          "checking alignment");
  95 }
  96 #endif // ASSERT
  97 
  98 inline bool PSParallelCompact::mark_obj(oop obj) {
  99   const int obj_size = obj->size();
 100   if (mark_bitmap()->mark_obj(obj, obj_size)) {
 101     _summary_data.add_obj(obj, obj_size);
 102     return true;
 103   } else {
 104     return false;
 105   }
 106 }
 107 
 108 template <class T>
 109 inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
 110   T heap_oop = RawAccess<>::oop_load(p);
 111   if (!CompressedOops::is_null(heap_oop)) {
 112     oop obj = CompressedOops::decode_not_null(heap_oop);
 113     assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
 114 
 115     oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
 116     assert(new_obj != NULL,                    // is forwarding ptr?
 117            "should be forwarded");
 118     // Just always do the update unconditionally?
 119     if (new_obj != NULL) {
 120       assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
 121              "should be in object space");
 122       RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
 123     }
 124   }
 125 }
 126 
 127 template <typename T>
 128 void PSParallelCompact::AdjustPointerClosure::do_oop_nv(T* p) {
 129   adjust_pointer(p, _cm);
 130 }
 131 
 132 inline void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { do_oop_nv(p); }
 133 inline void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
 134 
 135 #endif // SHARE_VM_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
< prev index next >