< prev index next >

src/share/vm/oops/objArrayKlass.cpp

Print this page
rev 12854 : [mq]: gcinterface.patch


 204       }
 205     } else {
 206       // Since this array dimension has zero length, nothing will be
 207       // allocated, however the lower dimension values must be checked
 208       // for illegal values.
 209       for (int i = 0; i < rank - 1; ++i) {
 210         sizes += 1;
 211         if (*sizes < 0) {
 212           THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
 213         }
 214       }
 215     }
 216   }
 217   return h_array();
 218 }
 219 
 220 // Either oop or narrowOop depending on UseCompressedOops.
 221 template <class T> void ObjArrayKlass::do_copy(arrayOop s, T* src,
 222                                arrayOop d, T* dst, int length, TRAPS) {
 223 
 224   BarrierSet* bs = Universe::heap()->barrier_set();
 225   // For performance reasons, we assume we are that the write barrier we
 226   // are using has optimized modes for arrays of references.  At least one
 227   // of the asserts below will fail if this is not the case.
 228   assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
 229   assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
 230 
 231   if (s == d) {
 232     // since source and destination are equal we do not need conversion checks.
 233     assert(length > 0, "sanity check");
 234     bs->write_ref_array_pre(dst, length);
 235     Copy::conjoint_oops_atomic(src, dst, length);
 236   } else {
 237     // We have to make sure all elements conform to the destination array
 238     Klass* bound = ObjArrayKlass::cast(d->klass())->element_klass();
 239     Klass* stype = ObjArrayKlass::cast(s->klass())->element_klass();
 240     if (stype == bound || stype->is_subtype_of(bound)) {
 241       // elements are guaranteed to be subtypes, so no check necessary
 242       bs->write_ref_array_pre(dst, length);
 243       Copy::conjoint_oops_atomic(src, dst, length);
 244     } else {




 204       }
 205     } else {
 206       // Since this array dimension has zero length, nothing will be
 207       // allocated, however the lower dimension values must be checked
 208       // for illegal values.
 209       for (int i = 0; i < rank - 1; ++i) {
 210         sizes += 1;
 211         if (*sizes < 0) {
 212           THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
 213         }
 214       }
 215     }
 216   }
 217   return h_array();
 218 }
 219 
 220 // Either oop or narrowOop depending on UseCompressedOops.
 221 template <class T> void ObjArrayKlass::do_copy(arrayOop s, T* src,
 222                                arrayOop d, T* dst, int length, TRAPS) {
 223 
 224   BarrierSet* bs = GC::gc()->heap()->barrier_set();
 225   // For performance reasons, we assume we are that the write barrier we
 226   // are using has optimized modes for arrays of references.  At least one
 227   // of the asserts below will fail if this is not the case.
 228   assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
 229   assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
 230 
 231   if (s == d) {
 232     // since source and destination are equal we do not need conversion checks.
 233     assert(length > 0, "sanity check");
 234     bs->write_ref_array_pre(dst, length);
 235     Copy::conjoint_oops_atomic(src, dst, length);
 236   } else {
 237     // We have to make sure all elements conform to the destination array
 238     Klass* bound = ObjArrayKlass::cast(d->klass())->element_klass();
 239     Klass* stype = ObjArrayKlass::cast(s->klass())->element_klass();
 240     if (stype == bound || stype->is_subtype_of(bound)) {
 241       // elements are guaranteed to be subtypes, so no check necessary
 242       bs->write_ref_array_pre(dst, length);
 243       Copy::conjoint_oops_atomic(src, dst, length);
 244     } else {


< prev index next >