< prev index next >

src/share/vm/gc/cms/compactibleFreeListSpace.cpp

Print this page




 912       size_t res = fc->size();
 913 
 914       // Bugfix for systems with weak memory model (PPC64/IA64). The
 915       // block's free bit was set and we have read the size of the
 916       // block. Acquire and check the free bit again. If the block is
 917       // still free, the read size is correct.
 918       OrderAccess::acquire();
 919 
 920       // If the object is still a free chunk, return the size, else it
 921       // has been allocated so try again.
 922       if (FreeChunk::indicatesFreeChunk(p)) {
 923         assert(res != 0, "Block size should not be 0");
 924         return res;
 925       }
 926     } else {
 927       // Ensure klass read before size.
 928       Klass* k = oop(p)->klass_or_null_acquire();
 929       if (k != NULL) {
 930         assert(k->is_klass(), "Should really be klass oop.");
 931         oop o = (oop)p;
 932         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 933 
 934         size_t res = o->size_given_klass(k);
 935         res = adjustObjectSize(res);
 936         assert(res != 0, "Block size should not be 0");
 937         return res;
 938       }
 939     }
 940   }
 941 }
 942 
 943 // TODO: Now that is_parsable is gone, we should combine these two functions.
 944 // A variant of the above that uses the Printezis bits for
 945 // unparsable but allocated objects. This avoids any possible
 946 // stalls waiting for mutators to initialize objects, and is
 947 // thus potentially faster than the variant above. However,
 948 // this variant may return a zero size for a block that is
 949 // under mutation and for which a consistent size cannot be
 950 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 951 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 952                                                      const CMSCollector* c)


 962       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 963       size_t res = fc->size();
 964 
 965       // Bugfix for systems with weak memory model (PPC64/IA64). The
 966       // free bit of the block was set and we have read the size of
 967       // the block. Acquire and check the free bit again. If the
 968       // block is still free, the read size is correct.
 969       OrderAccess::acquire();
 970 
 971       if (FreeChunk::indicatesFreeChunk(p)) {
 972         assert(res != 0, "Block size should not be 0");
 973         assert(loops == 0, "Should be 0");
 974         return res;
 975       }
 976     } else {
 977       // Ensure klass read before size.
 978       Klass* k = oop(p)->klass_or_null_acquire();
 979       if (k != NULL) {
 980         assert(k->is_klass(), "Should really be klass oop.");
 981         oop o = (oop)p;
 982         assert(o->is_oop(), "Should be an oop");
 983 
 984         size_t res = o->size_given_klass(k);
 985         res = adjustObjectSize(res);
 986         assert(res != 0, "Block size should not be 0");
 987         return res;
 988       } else {
 989         // May return 0 if P-bits not present.
 990         return c->block_size_if_printezis_bits(p);
 991       }
 992     }
 993     assert(loops == 0, "Can loop at most once");
 994     DEBUG_ONLY(loops++;)
 995   }
 996 }
 997 
 998 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
 999   NOT_PRODUCT(verify_objects_initialized());
1000   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1001   FreeChunk* fc = (FreeChunk*)p;
1002   if (fc->is_free()) {
1003     return fc->size();
1004   } else {
1005     // Ignore mark word because this may be a recently promoted
1006     // object whose mark word is used to chain together grey
1007     // objects (the last one would have a null value).
1008     assert(oop(p)->is_oop(true), "Should be an oop");
1009     return adjustObjectSize(oop(p)->size());
1010   }
1011 }
1012 
1013 // This implementation assumes that the property of "being an object" is
1014 // stable.  But being a free chunk may not be (because of parallel
1015 // promotion.)
1016 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1017   FreeChunk* fc = (FreeChunk*)p;
1018   assert(is_in_reserved(p), "Should be in space");
1019   if (FreeChunk::indicatesFreeChunk(p)) return false;
1020   Klass* k = oop(p)->klass_or_null_acquire();
1021   if (k != NULL) {
1022     // Ignore mark word because it may have been used to
1023     // chain together promoted objects (the last one
1024     // would have a null value).
1025     assert(oop(p)->is_oop(true), "Should be an oop");
1026     return true;
1027   } else {
1028     return false;  // Was not an object at the start of collection.
1029   }
1030 }
1031 
1032 // Check if the object is alive. This fact is checked either by consulting
1033 // the main marking bitmap in the sweeping phase or, if it's a permanent
1034 // generation and we're not in the sweeping phase, by checking the
1035 // perm_gen_verify_bit_map where we store the "deadness" information if
1036 // we did not sweep the perm gen in the most recent previous GC cycle.
1037 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1038   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1039          "Else races are possible");
1040   assert(block_is_obj(p), "The address should point to an object");
1041 
1042   // If we're sweeping, we use object liveness information from the main bit map
1043   // for both perm gen and old gen.
1044   // We don't need to lock the bitmap (live_map or dead_map below), because
1045   // EITHER we are in the middle of the sweeping phase, and the


1049   // NOTE: This method is also used by jmap where, if class unloading is
1050   // off, the results can return "false" for legitimate perm objects,
1051   // when we are not in the midst of a sweeping phase, which can result
1052   // in jmap not reporting certain perm gen objects. This will be moot
1053   // if/when the perm gen goes away in the future.
1054   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1055     CMSBitMap* live_map = _collector->markBitMap();
1056     return live_map->par_isMarked((HeapWord*) p);
1057   }
1058   return true;
1059 }
1060 
1061 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1062   FreeChunk* fc = (FreeChunk*)p;
1063   assert(is_in_reserved(p), "Should be in space");
1064   assert(_bt.block_start(p) == p, "Should be a block boundary");
1065   if (!fc->is_free()) {
1066     // Ignore mark word because it may have been used to
1067     // chain together promoted objects (the last one
1068     // would have a null value).
1069     assert(oop(p)->is_oop(true), "Should be an oop");
1070     return true;
1071   }
1072   return false;
1073 }
1074 
1075 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1076 // approximate answer if you don't hold the freelistlock when you call this.
1077 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1078   size_t size = 0;
1079   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1080     debug_only(
1081       // We may be calling here without the lock in which case we
1082       // won't do this modest sanity check.
1083       if (freelistLock()->owned_by_self()) {
1084         size_t total_list_size = 0;
1085         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1086           fc = fc->next()) {
1087           total_list_size += i;
1088         }
1089         assert(total_list_size == i * _indexedFreeList[i].count(),


2157   const CompactibleFreeListSpace* _sp;
2158   const MemRegion                 _span;
2159   HeapWord*                       _last_addr;
2160   size_t                          _last_size;
2161   bool                            _last_was_obj;
2162   bool                            _last_was_live;
2163 
2164  public:
2165   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2166     MemRegion span) :  _sp(sp), _span(span),
2167                        _last_addr(NULL), _last_size(0),
2168                        _last_was_obj(false), _last_was_live(false) { }
2169 
2170   virtual size_t do_blk(HeapWord* addr) {
2171     size_t res;
2172     bool   was_obj  = false;
2173     bool   was_live = false;
2174     if (_sp->block_is_obj(addr)) {
2175       was_obj = true;
2176       oop p = oop(addr);
2177       guarantee(p->is_oop(), "Should be an oop");
2178       res = _sp->adjustObjectSize(p->size());
2179       if (_sp->obj_is_alive(addr)) {
2180         was_live = true;
2181         p->verify();
2182       }
2183     } else {
2184       FreeChunk* fc = (FreeChunk*)addr;
2185       res = fc->size();
2186       if (FLSVerifyLists && !fc->cantCoalesce()) {
2187         guarantee(_sp->verify_chunk_in_free_list(fc),
2188                   "Chunk should be on a free list");
2189       }
2190     }
2191     if (res == 0) {
2192       Log(gc, verify) log;
2193       log.error("Livelock: no rank reduction!");
2194       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2195                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2196         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2197         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");


2209 
2210 class VerifyAllOopsClosure: public OopClosure {
2211  private:
2212   const CMSCollector*             _collector;
2213   const CompactibleFreeListSpace* _sp;
2214   const MemRegion                 _span;
2215   const bool                      _past_remark;
2216   const CMSBitMap*                _bit_map;
2217 
2218  protected:
2219   void do_oop(void* p, oop obj) {
2220     if (_span.contains(obj)) { // the interior oop points into CMS heap
2221       if (!_span.contains(p)) { // reference from outside CMS heap
2222         // Should be a valid object; the first disjunct below allows
2223         // us to sidestep an assertion in block_is_obj() that insists
2224         // that p be in _sp. Note that several generations (and spaces)
2225         // are spanned by _span (CMS heap) above.
2226         guarantee(!_sp->is_in_reserved(obj) ||
2227                   _sp->block_is_obj((HeapWord*)obj),
2228                   "Should be an object");
2229         guarantee(obj->is_oop(), "Should be an oop");
2230         obj->verify();
2231         if (_past_remark) {
2232           // Remark has been completed, the object should be marked
2233           _bit_map->isMarked((HeapWord*)obj);
2234         }
2235       } else { // reference within CMS heap
2236         if (_past_remark) {
2237           // Remark has been completed -- so the referent should have
2238           // been marked, if referring object is.
2239           if (_bit_map->isMarked(_collector->block_start(p))) {
2240             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2241           }
2242         }
2243       }
2244     } else if (_sp->is_in_reserved(p)) {
2245       // the reference is from FLS, and points out of FLS
2246       guarantee(obj->is_oop(), "Should be an oop");
2247       obj->verify();
2248     }
2249   }
2250 
2251   template <class T> void do_oop_work(T* p) {
2252     T heap_oop = oopDesc::load_heap_oop(p);
2253     if (!oopDesc::is_null(heap_oop)) {
2254       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2255       do_oop(p, obj);
2256     }
2257   }
2258 
2259  public:
2260   VerifyAllOopsClosure(const CMSCollector* collector,
2261     const CompactibleFreeListSpace* sp, MemRegion span,
2262     bool past_remark, CMSBitMap* bit_map) :
2263     _collector(collector), _sp(sp), _span(span),
2264     _past_remark(past_remark), _bit_map(bit_map) { }
2265 
2266   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }




 912       size_t res = fc->size();
 913 
 914       // Bugfix for systems with weak memory model (PPC64/IA64). The
 915       // block's free bit was set and we have read the size of the
 916       // block. Acquire and check the free bit again. If the block is
 917       // still free, the read size is correct.
 918       OrderAccess::acquire();
 919 
 920       // If the object is still a free chunk, return the size, else it
 921       // has been allocated so try again.
 922       if (FreeChunk::indicatesFreeChunk(p)) {
 923         assert(res != 0, "Block size should not be 0");
 924         return res;
 925       }
 926     } else {
 927       // Ensure klass read before size.
 928       Klass* k = oop(p)->klass_or_null_acquire();
 929       if (k != NULL) {
 930         assert(k->is_klass(), "Should really be klass oop.");
 931         oop o = (oop)p;
 932         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
 933 
 934         size_t res = o->size_given_klass(k);
 935         res = adjustObjectSize(res);
 936         assert(res != 0, "Block size should not be 0");
 937         return res;
 938       }
 939     }
 940   }
 941 }
 942 
 943 // TODO: Now that is_parsable is gone, we should combine these two functions.
 944 // A variant of the above that uses the Printezis bits for
 945 // unparsable but allocated objects. This avoids any possible
 946 // stalls waiting for mutators to initialize objects, and is
 947 // thus potentially faster than the variant above. However,
 948 // this variant may return a zero size for a block that is
 949 // under mutation and for which a consistent size cannot be
 950 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 951 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 952                                                      const CMSCollector* c)


 962       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 963       size_t res = fc->size();
 964 
 965       // Bugfix for systems with weak memory model (PPC64/IA64). The
 966       // free bit of the block was set and we have read the size of
 967       // the block. Acquire and check the free bit again. If the
 968       // block is still free, the read size is correct.
 969       OrderAccess::acquire();
 970 
 971       if (FreeChunk::indicatesFreeChunk(p)) {
 972         assert(res != 0, "Block size should not be 0");
 973         assert(loops == 0, "Should be 0");
 974         return res;
 975       }
 976     } else {
 977       // Ensure klass read before size.
 978       Klass* k = oop(p)->klass_or_null_acquire();
 979       if (k != NULL) {
 980         assert(k->is_klass(), "Should really be klass oop.");
 981         oop o = (oop)p;
 982         assert(oopDesc::is_oop(o), "Should be an oop");
 983 
 984         size_t res = o->size_given_klass(k);
 985         res = adjustObjectSize(res);
 986         assert(res != 0, "Block size should not be 0");
 987         return res;
 988       } else {
 989         // May return 0 if P-bits not present.
 990         return c->block_size_if_printezis_bits(p);
 991       }
 992     }
 993     assert(loops == 0, "Can loop at most once");
 994     DEBUG_ONLY(loops++;)
 995   }
 996 }
 997 
 998 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
 999   NOT_PRODUCT(verify_objects_initialized());
1000   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1001   FreeChunk* fc = (FreeChunk*)p;
1002   if (fc->is_free()) {
1003     return fc->size();
1004   } else {
1005     // Ignore mark word because this may be a recently promoted
1006     // object whose mark word is used to chain together grey
1007     // objects (the last one would have a null value).
1008     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1009     return adjustObjectSize(oop(p)->size());
1010   }
1011 }
1012 
1013 // This implementation assumes that the property of "being an object" is
1014 // stable.  But being a free chunk may not be (because of parallel
1015 // promotion.)
1016 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1017   FreeChunk* fc = (FreeChunk*)p;
1018   assert(is_in_reserved(p), "Should be in space");
1019   if (FreeChunk::indicatesFreeChunk(p)) return false;
1020   Klass* k = oop(p)->klass_or_null_acquire();
1021   if (k != NULL) {
1022     // Ignore mark word because it may have been used to
1023     // chain together promoted objects (the last one
1024     // would have a null value).
1025     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1026     return true;
1027   } else {
1028     return false;  // Was not an object at the start of collection.
1029   }
1030 }
1031 
1032 // Check if the object is alive. This fact is checked either by consulting
1033 // the main marking bitmap in the sweeping phase or, if it's a permanent
1034 // generation and we're not in the sweeping phase, by checking the
1035 // perm_gen_verify_bit_map where we store the "deadness" information if
1036 // we did not sweep the perm gen in the most recent previous GC cycle.
1037 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1038   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1039          "Else races are possible");
1040   assert(block_is_obj(p), "The address should point to an object");
1041 
1042   // If we're sweeping, we use object liveness information from the main bit map
1043   // for both perm gen and old gen.
1044   // We don't need to lock the bitmap (live_map or dead_map below), because
1045   // EITHER we are in the middle of the sweeping phase, and the


1049   // NOTE: This method is also used by jmap where, if class unloading is
1050   // off, the results can return "false" for legitimate perm objects,
1051   // when we are not in the midst of a sweeping phase, which can result
1052   // in jmap not reporting certain perm gen objects. This will be moot
1053   // if/when the perm gen goes away in the future.
1054   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1055     CMSBitMap* live_map = _collector->markBitMap();
1056     return live_map->par_isMarked((HeapWord*) p);
1057   }
1058   return true;
1059 }
1060 
1061 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1062   FreeChunk* fc = (FreeChunk*)p;
1063   assert(is_in_reserved(p), "Should be in space");
1064   assert(_bt.block_start(p) == p, "Should be a block boundary");
1065   if (!fc->is_free()) {
1066     // Ignore mark word because it may have been used to
1067     // chain together promoted objects (the last one
1068     // would have a null value).
1069     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1070     return true;
1071   }
1072   return false;
1073 }
1074 
1075 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1076 // approximate answer if you don't hold the freelistlock when you call this.
1077 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1078   size_t size = 0;
1079   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1080     debug_only(
1081       // We may be calling here without the lock in which case we
1082       // won't do this modest sanity check.
1083       if (freelistLock()->owned_by_self()) {
1084         size_t total_list_size = 0;
1085         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1086           fc = fc->next()) {
1087           total_list_size += i;
1088         }
1089         assert(total_list_size == i * _indexedFreeList[i].count(),


2157   const CompactibleFreeListSpace* _sp;
2158   const MemRegion                 _span;
2159   HeapWord*                       _last_addr;
2160   size_t                          _last_size;
2161   bool                            _last_was_obj;
2162   bool                            _last_was_live;
2163 
2164  public:
2165   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2166     MemRegion span) :  _sp(sp), _span(span),
2167                        _last_addr(NULL), _last_size(0),
2168                        _last_was_obj(false), _last_was_live(false) { }
2169 
2170   virtual size_t do_blk(HeapWord* addr) {
2171     size_t res;
2172     bool   was_obj  = false;
2173     bool   was_live = false;
2174     if (_sp->block_is_obj(addr)) {
2175       was_obj = true;
2176       oop p = oop(addr);
2177       guarantee(oopDesc::is_oop(p), "Should be an oop");
2178       res = _sp->adjustObjectSize(p->size());
2179       if (_sp->obj_is_alive(addr)) {
2180         was_live = true;
2181         p->verify();
2182       }
2183     } else {
2184       FreeChunk* fc = (FreeChunk*)addr;
2185       res = fc->size();
2186       if (FLSVerifyLists && !fc->cantCoalesce()) {
2187         guarantee(_sp->verify_chunk_in_free_list(fc),
2188                   "Chunk should be on a free list");
2189       }
2190     }
2191     if (res == 0) {
2192       Log(gc, verify) log;
2193       log.error("Livelock: no rank reduction!");
2194       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2195                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2196         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2197         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");


2209 
2210 class VerifyAllOopsClosure: public OopClosure {
2211  private:
2212   const CMSCollector*             _collector;
2213   const CompactibleFreeListSpace* _sp;
2214   const MemRegion                 _span;
2215   const bool                      _past_remark;
2216   const CMSBitMap*                _bit_map;
2217 
2218  protected:
2219   void do_oop(void* p, oop obj) {
2220     if (_span.contains(obj)) { // the interior oop points into CMS heap
2221       if (!_span.contains(p)) { // reference from outside CMS heap
2222         // Should be a valid object; the first disjunct below allows
2223         // us to sidestep an assertion in block_is_obj() that insists
2224         // that p be in _sp. Note that several generations (and spaces)
2225         // are spanned by _span (CMS heap) above.
2226         guarantee(!_sp->is_in_reserved(obj) ||
2227                   _sp->block_is_obj((HeapWord*)obj),
2228                   "Should be an object");
2229         guarantee(oopDesc::is_oop(obj), "Should be an oop");
2230         obj->verify();
2231         if (_past_remark) {
2232           // Remark has been completed, the object should be marked
2233           _bit_map->isMarked((HeapWord*)obj);
2234         }
2235       } else { // reference within CMS heap
2236         if (_past_remark) {
2237           // Remark has been completed -- so the referent should have
2238           // been marked, if referring object is.
2239           if (_bit_map->isMarked(_collector->block_start(p))) {
2240             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2241           }
2242         }
2243       }
2244     } else if (_sp->is_in_reserved(p)) {
2245       // the reference is from FLS, and points out of FLS
2246       guarantee(oopDesc::is_oop(obj), "Should be an oop");
2247       obj->verify();
2248     }
2249   }
2250 
2251   template <class T> void do_oop_work(T* p) {
2252     T heap_oop = oopDesc::load_heap_oop(p);
2253     if (!oopDesc::is_null(heap_oop)) {
2254       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2255       do_oop(p, obj);
2256     }
2257   }
2258 
2259  public:
2260   VerifyAllOopsClosure(const CMSCollector* collector,
2261     const CompactibleFreeListSpace* sp, MemRegion span,
2262     bool past_remark, CMSBitMap* bit_map) :
2263     _collector(collector), _sp(sp), _span(span),
2264     _past_remark(past_remark), _bit_map(bit_map) { }
2265 
2266   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }


< prev index next >