< prev index next >

src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp

Print this page
rev 53367 : 8248851: CMS: Missing memory fences between free chunk check and klass read
Reviewed-by: aph, kbarrett, dholmes
Contributed-by: wangshuai94@huawei.com
   1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1161   // the value read the first time in a register.
1162   while (true) {
1163     // We must do this until we get a consistent view of the object.
1164     if (FreeChunk::indicatesFreeChunk(p)) {
1165       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1166       size_t res = fc->size();
1167 
1168       // Bugfix for systems with weak memory model (PPC64/IA64). The
1169       // block's free bit was set and we have read the size of the
1170       // block. Acquire and check the free bit again. If the block is
1171       // still free, the read size is correct.
1172       OrderAccess::acquire();
1173 
1174       // If the object is still a free chunk, return the size, else it
1175       // has been allocated so try again.
1176       if (FreeChunk::indicatesFreeChunk(p)) {
1177         assert(res != 0, "Block size should not be 0");
1178         return res;
1179       }
1180     } else {




1181       // Ensure klass read before size.
1182       Klass* k = oop(p)->klass_or_null_acquire();
1183       if (k != NULL) {
1184         assert(k->is_klass(), "Should really be klass oop.");
1185         oop o = (oop)p;
1186         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
1187 
1188         size_t res = o->size_given_klass(k);
1189         res = adjustObjectSize(res);
1190         assert(res != 0, "Block size should not be 0");
1191         return res;
1192       }
1193     }
1194   }
1195 }
1196 
1197 // TODO: Now that is_parsable is gone, we should combine these two functions.
1198 // A variant of the above that uses the Printezis bits for
1199 // unparsable but allocated objects. This avoids any possible
1200 // stalls waiting for mutators to initialize objects, and is


1211   // the value read the first time in a register.
1212   DEBUG_ONLY(uint loops = 0;)
1213   while (true) {
1214     // We must do this until we get a consistent view of the object.
1215     if (FreeChunk::indicatesFreeChunk(p)) {
1216       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1217       size_t res = fc->size();
1218 
1219       // Bugfix for systems with weak memory model (PPC64/IA64). The
1220       // free bit of the block was set and we have read the size of
1221       // the block. Acquire and check the free bit again. If the
1222       // block is still free, the read size is correct.
1223       OrderAccess::acquire();
1224 
1225       if (FreeChunk::indicatesFreeChunk(p)) {
1226         assert(res != 0, "Block size should not be 0");
1227         assert(loops == 0, "Should be 0");
1228         return res;
1229       }
1230     } else {




1231       // Ensure klass read before size.
1232       Klass* k = oop(p)->klass_or_null_acquire();
1233       if (k != NULL) {
1234         assert(k->is_klass(), "Should really be klass oop.");
1235         oop o = (oop)p;
1236         assert(oopDesc::is_oop(o), "Should be an oop");
1237 
1238         size_t res = o->size_given_klass(k);
1239         res = adjustObjectSize(res);
1240         assert(res != 0, "Block size should not be 0");
1241         return res;
1242       } else {
1243         // May return 0 if P-bits not present.
1244         return c->block_size_if_printezis_bits(p);
1245       }
1246     }
1247     assert(loops == 0, "Can loop at most once");
1248     DEBUG_ONLY(loops++;)
1249   }
1250 }


1254   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1255   FreeChunk* fc = (FreeChunk*)p;
1256   if (fc->is_free()) {
1257     return fc->size();
1258   } else {
1259     // Ignore mark word because this may be a recently promoted
1260     // object whose mark word is used to chain together grey
1261     // objects (the last one would have a null value).
1262     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1263     return adjustObjectSize(oop(p)->size());
1264   }
1265 }
1266 
1267 // This implementation assumes that the property of "being an object" is
1268 // stable.  But being a free chunk may not be (because of parallel
1269 // promotion.)
1270 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1271   FreeChunk* fc = (FreeChunk*)p;
1272   assert(is_in_reserved(p), "Should be in space");
1273   if (FreeChunk::indicatesFreeChunk(p)) return false;





1274   Klass* k = oop(p)->klass_or_null_acquire();
1275   if (k != NULL) {
1276     // Ignore mark word because it may have been used to
1277     // chain together promoted objects (the last one
1278     // would have a null value).
1279     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1280     return true;
1281   } else {
1282     return false;  // Was not an object at the start of collection.
1283   }
1284 }
1285 
1286 // Check if the object is alive. This fact is checked either by consulting
1287 // the main marking bitmap in the sweeping phase or, if it's a permanent
1288 // generation and we're not in the sweeping phase, by checking the
1289 // perm_gen_verify_bit_map where we store the "deadness" information if
1290 // we did not sweep the perm gen in the most recent previous GC cycle.
1291 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1292   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1293          "Else races are possible");


   1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1161   // the value read the first time in a register.
1162   while (true) {
1163     // We must do this until we get a consistent view of the object.
1164     if (FreeChunk::indicatesFreeChunk(p)) {
1165       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1166       size_t res = fc->size();
1167 
1168       // Bugfix for systems with weak memory model (PPC64/IA64). The
1169       // block's free bit was set and we have read the size of the
1170       // block. Acquire and check the free bit again. If the block is
1171       // still free, the read size is correct.
1172       OrderAccess::acquire();
1173 
1174       // If the object is still a free chunk, return the size, else it
1175       // has been allocated so try again.
1176       if (FreeChunk::indicatesFreeChunk(p)) {
1177         assert(res != 0, "Block size should not be 0");
1178         return res;
1179       }
1180     } else {
1181       // The barrier is required to prevent reordering of the free chunk check
1182       // and the klass read.
1183       OrderAccess::loadload();
1184 
1185       // Ensure klass read before size.
1186       Klass* k = oop(p)->klass_or_null_acquire();
1187       if (k != NULL) {
1188         assert(k->is_klass(), "Should really be klass oop.");
1189         oop o = (oop)p;
1190         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
1191 
1192         size_t res = o->size_given_klass(k);
1193         res = adjustObjectSize(res);
1194         assert(res != 0, "Block size should not be 0");
1195         return res;
1196       }
1197     }
1198   }
1199 }
1200 
1201 // TODO: Now that is_parsable is gone, we should combine these two functions.
1202 // A variant of the above that uses the Printezis bits for
1203 // unparsable but allocated objects. This avoids any possible
1204 // stalls waiting for mutators to initialize objects, and is


1215   // the value read the first time in a register.
1216   DEBUG_ONLY(uint loops = 0;)
1217   while (true) {
1218     // We must do this until we get a consistent view of the object.
1219     if (FreeChunk::indicatesFreeChunk(p)) {
1220       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1221       size_t res = fc->size();
1222 
1223       // Bugfix for systems with weak memory model (PPC64/IA64). The
1224       // free bit of the block was set and we have read the size of
1225       // the block. Acquire and check the free bit again. If the
1226       // block is still free, the read size is correct.
1227       OrderAccess::acquire();
1228 
1229       if (FreeChunk::indicatesFreeChunk(p)) {
1230         assert(res != 0, "Block size should not be 0");
1231         assert(loops == 0, "Should be 0");
1232         return res;
1233       }
1234     } else {
1235       // The barrier is required to prevent reordering of the free chunk check
1236       // and the klass read.
1237       OrderAccess::loadload();
1238 
1239       // Ensure klass read before size.
1240       Klass* k = oop(p)->klass_or_null_acquire();
1241       if (k != NULL) {
1242         assert(k->is_klass(), "Should really be klass oop.");
1243         oop o = (oop)p;
1244         assert(oopDesc::is_oop(o), "Should be an oop");
1245 
1246         size_t res = o->size_given_klass(k);
1247         res = adjustObjectSize(res);
1248         assert(res != 0, "Block size should not be 0");
1249         return res;
1250       } else {
1251         // May return 0 if P-bits not present.
1252         return c->block_size_if_printezis_bits(p);
1253       }
1254     }
1255     assert(loops == 0, "Can loop at most once");
1256     DEBUG_ONLY(loops++;)
1257   }
1258 }


1262   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1263   FreeChunk* fc = (FreeChunk*)p;
1264   if (fc->is_free()) {
1265     return fc->size();
1266   } else {
1267     // Ignore mark word because this may be a recently promoted
1268     // object whose mark word is used to chain together grey
1269     // objects (the last one would have a null value).
1270     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1271     return adjustObjectSize(oop(p)->size());
1272   }
1273 }
1274 
1275 // This implementation assumes that the property of "being an object" is
1276 // stable.  But being a free chunk may not be (because of parallel
1277 // promotion.)
1278 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1279   FreeChunk* fc = (FreeChunk*)p;
1280   assert(is_in_reserved(p), "Should be in space");
1281   if (FreeChunk::indicatesFreeChunk(p)) return false;
1282 
1283   // The barrier is required to prevent reordering of the free chunk check
1284   // and the klass read.
1285   OrderAccess::loadload();
1286 
1287   Klass* k = oop(p)->klass_or_null_acquire();
1288   if (k != NULL) {
1289     // Ignore mark word because it may have been used to
1290     // chain together promoted objects (the last one
1291     // would have a null value).
1292     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1293     return true;
1294   } else {
1295     return false;  // Was not an object at the start of collection.
1296   }
1297 }
1298 
1299 // Check if the object is alive. This fact is checked either by consulting
1300 // the main marking bitmap in the sweeping phase or, if it's a permanent
1301 // generation and we're not in the sweeping phase, by checking the
1302 // perm_gen_verify_bit_map where we store the "deadness" information if
1303 // we did not sweep the perm gen in the most recent previous GC cycle.
1304 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1305   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1306          "Else races are possible");


< prev index next >