< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Print this page
rev 9232 : 8248851: CMS: Missing memory fences between free chunk check and klass read
Reviewed-by: aph
Contributed-by: wangshuai94@huawei.com


 977   // the value read the first time in a register.
 978   while (true) {
 979     // We must do this until we get a consistent view of the object.
 980     if (FreeChunk::indicatesFreeChunk(p)) {
 981       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 982       size_t res = fc->size();
 983 
 984       // Bugfix for systems with weak memory model (PPC64/IA64). The
 985       // block's free bit was set and we have read the size of the
 986       // block. Acquire and check the free bit again. If the block is
 987       // still free, the read size is correct.
 988       OrderAccess::acquire();
 989 
 990       // If the object is still a free chunk, return the size, else it
 991       // has been allocated so try again.
 992       if (FreeChunk::indicatesFreeChunk(p)) {
 993         assert(res != 0, "Block size should not be 0");
 994         return res;
 995       }
 996     } else {




 997       // must read from what 'p' points to in each loop.
 998       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 999       if (k != NULL) {
1000         assert(k->is_klass(), "Should really be klass oop.");
1001         oop o = (oop)p;
1002         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1003 
1004         // Bugfix for systems with weak memory model (PPC64/IA64).
1005         // The object o may be an array. Acquire to make sure that the array
1006         // size (third word) is consistent.
1007         OrderAccess::acquire();
1008 
1009         size_t res = o->size_given_klass(k);
1010         res = adjustObjectSize(res);
1011         assert(res != 0, "Block size should not be 0");
1012         return res;
1013       }
1014     }
1015   }
1016 }


1032   // the value read the first time in a register.
1033   DEBUG_ONLY(uint loops = 0;)
1034   while (true) {
1035     // We must do this until we get a consistent view of the object.
1036     if (FreeChunk::indicatesFreeChunk(p)) {
1037       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1038       size_t res = fc->size();
1039 
1040       // Bugfix for systems with weak memory model (PPC64/IA64). The
1041       // free bit of the block was set and we have read the size of
1042       // the block. Acquire and check the free bit again. If the
1043       // block is still free, the read size is correct.
1044       OrderAccess::acquire();
1045 
1046       if (FreeChunk::indicatesFreeChunk(p)) {
1047         assert(res != 0, "Block size should not be 0");
1048         assert(loops == 0, "Should be 0");
1049         return res;
1050       }
1051     } else {




1052       // must read from what 'p' points to in each loop.
1053       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1054       // We trust the size of any object that has a non-NULL
1055       // klass and (for those in the perm gen) is parsable
1056       // -- irrespective of its conc_safe-ty.
1057       if (k != NULL) {
1058         assert(k->is_klass(), "Should really be klass oop.");
1059         oop o = (oop)p;
1060         assert(o->is_oop(), "Should be an oop");
1061 
1062         // Bugfix for systems with weak memory model (PPC64/IA64).
1063         // The object o may be an array. Acquire to make sure that the array
1064         // size (third word) is consistent.
1065         OrderAccess::acquire();
1066 
1067         size_t res = o->size_given_klass(k);
1068         res = adjustObjectSize(res);
1069         assert(res != 0, "Block size should not be 0");
1070         return res;
1071       } else {


1094 }
1095 
1096 // This implementation assumes that the property of "being an object" is
1097 // stable.  But being a free chunk may not be (because of parallel
1098 // promotion.)
1099 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1100   FreeChunk* fc = (FreeChunk*)p;
1101   assert(is_in_reserved(p), "Should be in space");
1102   // When doing a mark-sweep-compact of the CMS generation, this
1103   // assertion may fail because prepare_for_compaction() uses
1104   // space that is garbage to maintain information on ranges of
1105   // live objects so that these live ranges can be moved as a whole.
1106   // Comment out this assertion until that problem can be solved
1107   // (i.e., that the block start calculation may look at objects
1108   // at address below "p" in finding the object that contains "p"
1109   // and those objects (if garbage) may have been modified to hold
1110   // live range information.
1111   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1112   //        "Should be a block boundary");
1113   if (FreeChunk::indicatesFreeChunk(p)) return false;





1114   Klass* k = oop(p)->klass_or_null();
1115   if (k != NULL) {
1116     // Ignore mark word because it may have been used to
1117     // chain together promoted objects (the last one
1118     // would have a null value).
1119     assert(oop(p)->is_oop(true), "Should be an oop");
1120     return true;
1121   } else {
1122     return false;  // Was not an object at the start of collection.
1123   }
1124 }
1125 
1126 // Check if the object is alive. This fact is checked either by consulting
1127 // the main marking bitmap in the sweeping phase or, if it's a permanent
1128 // generation and we're not in the sweeping phase, by checking the
1129 // perm_gen_verify_bit_map where we store the "deadness" information if
1130 // we did not sweep the perm gen in the most recent previous GC cycle.
1131 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1132   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1133          "Else races are possible");




 977   // the value read the first time in a register.
 978   while (true) {
 979     // We must do this until we get a consistent view of the object.
 980     if (FreeChunk::indicatesFreeChunk(p)) {
 981       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 982       size_t res = fc->size();
 983 
 984       // Bugfix for systems with weak memory model (PPC64/IA64). The
 985       // block's free bit was set and we have read the size of the
 986       // block. Acquire and check the free bit again. If the block is
 987       // still free, the read size is correct.
 988       OrderAccess::acquire();
 989 
 990       // If the object is still a free chunk, return the size, else it
 991       // has been allocated so try again.
 992       if (FreeChunk::indicatesFreeChunk(p)) {
 993         assert(res != 0, "Block size should not be 0");
 994         return res;
 995       }
 996     } else {
 997       // Acquire to make sure that the klass read happens after the free
 998       // chunk check.
 999       OrderAccess::acquire();
1000 
1001       // must read from what 'p' points to in each loop.
1002       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1003       if (k != NULL) {
1004         assert(k->is_klass(), "Should really be klass oop.");
1005         oop o = (oop)p;
1006         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1007 
1008         // Bugfix for systems with weak memory model (PPC64/IA64).
1009         // The object o may be an array. Acquire to make sure that the array
1010         // size (third word) is consistent.
1011         OrderAccess::acquire();
1012 
1013         size_t res = o->size_given_klass(k);
1014         res = adjustObjectSize(res);
1015         assert(res != 0, "Block size should not be 0");
1016         return res;
1017       }
1018     }
1019   }
1020 }


1036   // the value read the first time in a register.
1037   DEBUG_ONLY(uint loops = 0;)
1038   while (true) {
1039     // We must do this until we get a consistent view of the object.
1040     if (FreeChunk::indicatesFreeChunk(p)) {
1041       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1042       size_t res = fc->size();
1043 
1044       // Bugfix for systems with weak memory model (PPC64/IA64). The
1045       // free bit of the block was set and we have read the size of
1046       // the block. Acquire and check the free bit again. If the
1047       // block is still free, the read size is correct.
1048       OrderAccess::acquire();
1049 
1050       if (FreeChunk::indicatesFreeChunk(p)) {
1051         assert(res != 0, "Block size should not be 0");
1052         assert(loops == 0, "Should be 0");
1053         return res;
1054       }
1055     } else {
1056       // Acquire to make sure that the klass read happens after the free
1057       // chunk check.
1058       OrderAccess::acquire();
1059 
1060       // must read from what 'p' points to in each loop.
1061       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1062       // We trust the size of any object that has a non-NULL
1063       // klass and (for those in the perm gen) is parsable
1064       // -- irrespective of its conc_safe-ty.
1065       if (k != NULL) {
1066         assert(k->is_klass(), "Should really be klass oop.");
1067         oop o = (oop)p;
1068         assert(o->is_oop(), "Should be an oop");
1069 
1070         // Bugfix for systems with weak memory model (PPC64/IA64).
1071         // The object o may be an array. Acquire to make sure that the array
1072         // size (third word) is consistent.
1073         OrderAccess::acquire();
1074 
1075         size_t res = o->size_given_klass(k);
1076         res = adjustObjectSize(res);
1077         assert(res != 0, "Block size should not be 0");
1078         return res;
1079       } else {


1102 }
1103 
1104 // This implementation assumes that the property of "being an object" is
1105 // stable.  But being a free chunk may not be (because of parallel
1106 // promotion.)
1107 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1108   FreeChunk* fc = (FreeChunk*)p;
1109   assert(is_in_reserved(p), "Should be in space");
1110   // When doing a mark-sweep-compact of the CMS generation, this
1111   // assertion may fail because prepare_for_compaction() uses
1112   // space that is garbage to maintain information on ranges of
1113   // live objects so that these live ranges can be moved as a whole.
1114   // Comment out this assertion until that problem can be solved
1115   // (i.e., that the block start calculation may look at objects
1116   // at address below "p" in finding the object that contains "p"
1117   // and those objects (if garbage) may have been modified to hold
1118   // live range information.
1119   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1120   //        "Should be a block boundary");
1121   if (FreeChunk::indicatesFreeChunk(p)) return false;
1122 
1123   // Acquire to make sure that the klass read happens after the free
1124   // chunk check.
1125   OrderAccess::acquire();
1126 
1127   Klass* k = oop(p)->klass_or_null();
1128   if (k != NULL) {
1129     // Ignore mark word because it may have been used to
1130     // chain together promoted objects (the last one
1131     // would have a null value).
1132     assert(oop(p)->is_oop(true), "Should be an oop");
1133     return true;
1134   } else {
1135     return false;  // Was not an object at the start of collection.
1136   }
1137 }
1138 
1139 // Check if the object is alive. This fact is checked either by consulting
1140 // the main marking bitmap in the sweeping phase or, if it's a permanent
1141 // generation and we're not in the sweeping phase, by checking the
1142 // perm_gen_verify_bit_map where we store the "deadness" information if
1143 // we did not sweep the perm gen in the most recent previous GC cycle.
1144 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1145   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1146          "Else races are possible");


< prev index next >