1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/collectedHeap.inline.hpp"
  28 #include "gc/shared/gcLocker.inline.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "logging/log.hpp"
  31 #include "memory/metadataFactory.hpp"
  32 #include "oops/access.hpp"
  33 #include "oops/compressedOops.inline.hpp"
  34 #include "oops/fieldStreams.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/valueKlass.hpp"
  40 #include "oops/valueArrayKlass.hpp"
  41 #include "runtime/fieldDescriptor.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 int ValueKlass::first_field_offset() const {
  50   if (UseNewLayout) {
  51     return get_first_field_offset();
  52   }
  53 #ifdef ASSERT
  54   int first_offset = INT_MAX;
  55   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  56     if (fs.offset() < first_offset) first_offset= fs.offset();
  57   }
  58 #endif
  59   int base_offset = instanceOopDesc::base_offset_in_bytes();
  60   // The first field of value types is aligned on a long boundary
  61   base_offset = align_up(base_offset, BytesPerLong);
  62   assert(base_offset == first_offset, "inconsistent offsets");
  63   return base_offset;
  64 }
  65 
  66 int ValueKlass::raw_value_byte_size() const {
  67   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  68   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  69   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  70   if (heapOopAlignedSize >= longSize || contains_oops()) {
  71     return heapOopAlignedSize;
  72   }
  73   // Small primitives...
  74   // If a few small basic type fields, return the actual size, i.e.
  75   // 1 byte = 1
  76   // 2 byte = 2
  77   // 3 byte = 4, because pow2 needed for element stores
  78   int first_offset = first_field_offset();
  79   int last_offset  = 0; // find the last offset, add basic type size
  80   int last_tsz     = 0;
  81   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  82     if (fs.access_flags().is_static()) {
  83       continue;
  84     } else if (fs.offset() > last_offset) {
  85       BasicType type = fs.field_descriptor().field_type();
  86       if (is_java_primitive(type)) {
  87         last_tsz = type2aelembytes(type);
  88       } else if (type == T_VALUETYPE) {
  89         // Not just primitives. Layout aligns embedded value, so use jlong aligned it is
  90         return heapOopAlignedSize;
  91       } else {
  92         guarantee(0, "Unknown type %d", type);
  93       }
  94       assert(last_tsz != 0, "Invariant");
  95       last_offset = fs.offset();
  96     }
  97   }
  98   // Assumes VT with no fields are meaningless and illegal
  99   last_offset += last_tsz;
 100   assert(last_offset > first_offset && last_tsz, "Invariant");
 101   return 1 << upper_log2(last_offset - first_offset);
 102 }
 103 
 104 instanceOop ValueKlass::allocate_instance(TRAPS) {
 105   int size = size_helper();  // Query before forming handle.
 106 
 107   instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
 108   assert(oop->mark()->is_always_locked(), "Unlocked value type");
 109   return oop;
 110 }
 111 
 112 bool ValueKlass::is_atomic() {
 113   return (nonstatic_field_size() * heapOopSize) <= longSize;
 114 }
 115 
 116 int ValueKlass::nonstatic_oop_count() {
 117   int oops = 0;
 118   int map_count = nonstatic_oop_map_count();
 119   OopMapBlock* block = start_of_nonstatic_oop_maps();
 120   OopMapBlock* end = block + map_count;
 121   while (block != end) {
 122     oops += block->count();
 123     block++;
 124   }
 125   return oops;
 126 }
 127 
 128 // Arrays of...
 129 
 130 bool ValueKlass::flatten_array() {
 131   if (!ValueArrayFlatten) {
 132     return false;
 133   }
 134 
 135   int elem_bytes = raw_value_byte_size();
 136   // Too big
 137   if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) {
 138     return false;
 139   }
 140   // Too many embedded oops
 141   if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) {
 142     return false;
 143   }
 144 
 145   return true;
 146 }
 147 
 148 
 149 Klass* ValueKlass::array_klass_impl(ArrayStorageProperties storage_props, bool or_null, int n, TRAPS) {
 150   if (storage_props.is_null_free()) {
 151     return value_array_klass(storage_props, or_null, n, THREAD);
 152   } else {
 153     return InstanceKlass::array_klass_impl(storage_props, or_null, n, THREAD);
 154   }
 155 }
 156 
 157 Klass* ValueKlass::array_klass_impl(ArrayStorageProperties storage_props, bool or_null, TRAPS) {
 158   return array_klass_impl(storage_props, or_null, 1, THREAD);
 159 }
 160 
 161 Klass* ValueKlass::value_array_klass(ArrayStorageProperties storage_props, bool or_null, int rank, TRAPS) {
 162   Klass* vak = acquire_value_array_klass();
 163   if (vak == NULL) {
 164     if (or_null) return NULL;
 165     ResourceMark rm;
 166     {
 167       // Atomic creation of array_klasses
 168       MutexLocker ma(MultiArray_lock, THREAD);
 169       if (get_value_array_klass() == NULL) {
 170         vak = allocate_value_array_klass(CHECK_NULL);
 171         OrderAccess::release_store((Klass**)adr_value_array_klass(), vak);
 172       }
 173     }
 174   }
 175   if (!vak->is_valueArray_klass()) {
 176     storage_props.clear_flattened();
 177   }
 178   if (or_null) {
 179     return vak->array_klass_or_null(storage_props, rank);
 180   }
 181   return vak->array_klass(storage_props, rank, THREAD);
 182 }
 183 
 184 Klass* ValueKlass::allocate_value_array_klass(TRAPS) {
 185   if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) {
 186     return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD);
 187   }
 188   return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD);
 189 }
 190 
 191 void ValueKlass::array_klasses_do(void f(Klass* k)) {
 192   InstanceKlass::array_klasses_do(f);
 193   if (get_value_array_klass() != NULL)
 194     ArrayKlass::cast(get_value_array_klass())->array_klasses_do(f);
 195 }
 196 
 197 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {
 198   if (!UseNewLayout) {
 199     /*
 200      * Try not to shear fields even if not an atomic store...
 201      *
 202      * First 3 cases handle value array store, otherwise works on the same basis
 203      * as JVM_Clone, at this size data is aligned. The order of primitive types
 204      * is largest to smallest, and it not possible for fields to stradle long
 205      * copy boundaries.
 206      *
 207      * If MT without exclusive access, possible to observe partial value store,
 208      * but not partial primitive and reference field values
 209      */
 210     switch (raw_byte_size) {
 211     case 1:
 212       *((jbyte*) dst) = *(jbyte*)src;
 213       break;
 214     case 2:
 215       *((jshort*) dst) = *(jshort*)src;
 216       break;
 217     case 4:
 218       *((jint*) dst) = *(jint*) src;
 219       break;
 220     default:
 221       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 222       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 223     }
 224   } else {
 225     int size = this->get_exact_size_in_bytes();
 226     int length;
 227     switch (this->get_alignment()) {
 228     case BytesPerLong:
 229       length = size >> LogBytesPerLong;
 230       if (length > 0) {
 231         Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, length);
 232         size -= length << LogBytesPerLong;
 233         src = (jlong*)src + length;
 234         dst = (jlong*)dst + length;
 235       }
 236       // Fallthrough
 237     case BytesPerInt:
 238       length = size >> LogBytesPerInt;
 239       if (length > 0) {
 240         Copy::conjoint_jints_atomic((jint*)src, (jint*)dst, length);
 241         size -= length << LogBytesPerInt;
 242         src = (jint*)src + length;
 243         dst = (jint*)dst + length;
 244       }
 245       // Fallthrough
 246     case BytesPerShort:
 247       length = size >> LogBytesPerShort;
 248       if (length > 0) {
 249         Copy::conjoint_jshorts_atomic((jshort*)src, (jshort*)dst, length);
 250         size -= length << LogBytesPerShort;
 251         src = (jshort*)src + length;
 252         dst = (jshort*)dst +length;
 253       }
 254       // Fallthrough
 255     case 1:
 256       if (size > 0) Copy::conjoint_jbytes_atomic((jbyte*)src, (jbyte*)dst, size);
 257       break;
 258     default:
 259       fatal("Unsupported alignment");
 260     }
 261   }
 262 }
 263 
 264 /*
 265  * Store the value of this klass contained with src into dst.
 266  *
 267  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 268  *
 269  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 270  * so raw point is "safe" for now.
 271  *
 272  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 273  *
 274  */
 275 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 276   if (contains_oops()) {
 277     if (dst_heap) {
 278       // src/dst aren't oops, need offset to adjust oop map offset
 279       const address dst_oop_addr = ((address) dst) - first_field_offset();
 280 
 281       ModRefBarrierSet* bs = barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set());
 282 
 283       // Pre-barriers...
 284       OopMapBlock* map = start_of_nonstatic_oop_maps();
 285       OopMapBlock* const end = map + nonstatic_oop_map_count();
 286       while (map != end) {
 287         // Shame we can't just use the existing oop iterator...src/dst aren't oop
 288         address doop_address = dst_oop_addr + map->offset();
 289         // TEMP HACK: barrier code need to migrate to => access API (need own versions of value type ops)
 290         if (UseCompressedOops) {
 291           bs->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized);
 292         } else {
 293           bs->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized);
 294         }
 295         map++;
 296       }
 297 
 298       raw_field_copy(src, dst, raw_byte_size);
 299 
 300       // Post-barriers...
 301       map = start_of_nonstatic_oop_maps();
 302       while (map != end) {
 303         address doop_address = dst_oop_addr + map->offset();
 304         bs->write_ref_array((HeapWord*) doop_address, map->count());
 305         map++;
 306       }
 307     } else { // Buffered value case
 308       raw_field_copy(src, dst, raw_byte_size);
 309     }
 310   } else {   // Primitive-only case...
 311     raw_field_copy(src, dst, raw_byte_size);
 312   }
 313 }
 314 
 315 // Value type arguments are not passed by reference, instead each
 316 // field of the value type is passed as an argument. This helper
 317 // function collects the fields of the value types (including embedded
 318 // value type's fields) in a list. Included with the field's type is
 319 // the offset of each field in the value type: i2c and c2i adapters
 320 // need that to load or store fields. Finally, the list of fields is
 321 // sorted in order of increasing offsets: the adapters and the
 322 // compiled code need to agree upon the order of fields.
 323 //
 324 // The list of basic types that is returned starts with a T_VALUETYPE
 325 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID pairs are used as
 326 // delimiters. Every entry between the two is a field of the value
 327 // type. If there's an embedded value type in the list, it also starts
 328 // with a T_VALUETYPE and ends with a T_VOID. This is so we can
 329 // generate a unique fingerprint for the method's adapters and we can
 330 // generate the list of basic types from the interpreter point of view
 331 // (value types passed as reference: iterate on the list until a
 332 // T_VALUETYPE, drop everything until and including the closing
 333 // T_VOID) or the compiler point of view (each field of the value
 334 // types is an argument: drop all T_VALUETYPE/T_VOID from the list).
 335 int ValueKlass::collect_fields(GrowableArray<SigEntry>* sig, int base_off) const {
 336   int count = 0;
 337   SigEntry::add_entry(sig, T_VALUETYPE, base_off);
 338   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
 339     if (fs.access_flags().is_static()) continue;
 340     int offset = base_off + fs.offset() - (base_off > 0 ? first_field_offset() : 0);
 341     if (fs.is_flattened()) {
 342       // Resolve klass of flattened value type field and recursively collect fields
 343       Klass* vk = get_value_field_klass(fs.index());
 344       count += ValueKlass::cast(vk)->collect_fields(sig, offset);
 345     } else {
 346       BasicType bt = FieldType::basic_type(fs.signature());
 347       if (bt == T_VALUETYPE) {
 348         bt = T_OBJECT;
 349       }
 350       SigEntry::add_entry(sig, bt, offset);
 351       count += type2size[bt];
 352     }
 353   }
 354   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0);
 355   SigEntry::add_entry(sig, T_VOID, offset);
 356   if (base_off == 0) {
 357     sig->sort(SigEntry::compare);
 358   }
 359   assert(sig->at(0)._bt == T_VALUETYPE && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
 360   return count;
 361 }
 362 
 363 void ValueKlass::initialize_calling_convention(TRAPS) {
 364   // Because the pack and unpack handler addresses need to be loadable from generated code,
 365   // they are stored at a fixed offset in the klass metadata. Since value type klasses do
 366   // not have a vtable, the vtable offset is used to store these addresses.
 367   if (is_scalarizable() && (ValueTypeReturnedAsFields || ValueTypePassFieldsAsArgs)) {
 368     ResourceMark rm;
 369     GrowableArray<SigEntry> sig_vk;
 370     int nb_fields = collect_fields(&sig_vk);
 371     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
 372     *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
 373     for (int i = 0; i < sig_vk.length(); i++) {
 374       extended_sig->at_put(i, sig_vk.at(i));
 375     }
 376 
 377     if (ValueTypeReturnedAsFields) {
 378       nb_fields++;
 379       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
 380       sig_bt[0] = T_METADATA;
 381       SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
 382       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
 383       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
 384 
 385       if (total > 0) {
 386         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
 387         *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
 388         for (int i = 0; i < nb_fields; i++) {
 389           return_regs->at_put(i, regs[i]);
 390         }
 391 
 392         BufferedValueTypeBlob* buffered_blob = SharedRuntime::generate_buffered_value_type_adapter(this);
 393         *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
 394         *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
 395         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
 396       }
 397     }
 398   }
 399 }
 400 
 401 void ValueKlass::deallocate_contents(ClassLoaderData* loader_data) {
 402   if (extended_sig() != NULL) {
 403     MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
 404   }
 405   if (return_regs() != NULL) {
 406     MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
 407   }
 408   cleanup_blobs();
 409   InstanceKlass::deallocate_contents(loader_data);
 410 }
 411 
 412 void ValueKlass::cleanup(ValueKlass* ik) {
 413   ik->cleanup_blobs();
 414 }
 415 
 416 void ValueKlass::cleanup_blobs() {
 417   if (pack_handler() != NULL) {
 418     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
 419     assert(buffered_blob->is_buffered_value_type_blob(), "bad blob type");
 420     BufferBlob::free((BufferBlob*)buffered_blob);
 421     *((address*)adr_pack_handler()) = NULL;
 422     *((address*)adr_unpack_handler()) = NULL;
 423   }
 424 }
 425 
 426 // Can this value type be scalarized?
 427 bool ValueKlass::is_scalarizable() const {
 428   return ScalarizeValueTypes;
 429 }
 430 
 431 // Can this value type be returned as multiple values?
 432 bool ValueKlass::can_be_returned_as_fields() const {
 433   return return_regs() != NULL;
 434 }
 435 
 436 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
 437 void ValueKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 438   Thread* thread = Thread::current();
 439   const Array<SigEntry>* sig_vk = extended_sig();
 440   const Array<VMRegPair>* regs = return_regs();
 441   int j = 1;
 442 
 443   for (int i = 0; i < sig_vk->length(); i++) {
 444     BasicType bt = sig_vk->at(i)._bt;
 445     if (bt == T_OBJECT || bt == T_ARRAY) {
 446       VMRegPair pair = regs->at(j);
 447       address loc = reg_map.location(pair.first());
 448       oop v = *(oop*)loc;
 449       assert(v == NULL || oopDesc::is_oop(v), "not an oop?");
 450       assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
 451       handles.push(Handle(thread, v));
 452     }
 453     if (bt == T_VALUETYPE) {
 454       continue;
 455     }
 456     if (bt == T_VOID &&
 457         sig_vk->at(i-1)._bt != T_LONG &&
 458         sig_vk->at(i-1)._bt != T_DOUBLE) {
 459       continue;
 460     }
 461     j++;
 462   }
 463   assert(j == regs->length(), "missed a field?");
 464 }
 465 
 466 // Update oop fields in registers from handles after a safepoint
 467 void ValueKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 468   assert(ValueTypeReturnedAsFields, "inconsistent");
 469   const Array<SigEntry>* sig_vk = extended_sig();
 470   const Array<VMRegPair>* regs = return_regs();
 471   assert(regs != NULL, "inconsistent");
 472 
 473   int j = 1;
 474   for (int i = 0, k = 0; i < sig_vk->length(); i++) {
 475     BasicType bt = sig_vk->at(i)._bt;
 476     if (bt == T_OBJECT || bt == T_ARRAY) {
 477       VMRegPair pair = regs->at(j);
 478       address loc = reg_map.location(pair.first());
 479       *(oop*)loc = handles.at(k++)();
 480     }
 481     if (bt == T_VALUETYPE) {
 482       continue;
 483     }
 484     if (bt == T_VOID &&
 485         sig_vk->at(i-1)._bt != T_LONG &&
 486         sig_vk->at(i-1)._bt != T_DOUBLE) {
 487       continue;
 488     }
 489     j++;
 490   }
 491   assert(j == regs->length(), "missed a field?");
 492 }
 493 
 494 // Fields are in registers. Create an instance of the value type and
 495 // initialize it with the values of the fields.
 496 oop ValueKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
 497   oop new_vt = allocate_instance(CHECK_NULL);
 498   const Array<SigEntry>* sig_vk = extended_sig();
 499   const Array<VMRegPair>* regs = return_regs();
 500 
 501   int j = 1;
 502   int k = 0;
 503   for (int i = 0; i < sig_vk->length(); i++) {
 504     BasicType bt = sig_vk->at(i)._bt;
 505     if (bt == T_VALUETYPE) {
 506       continue;
 507     }
 508     if (bt == T_VOID) {
 509       if (sig_vk->at(i-1)._bt == T_LONG ||
 510           sig_vk->at(i-1)._bt == T_DOUBLE) {
 511         j++;
 512       }
 513       continue;
 514     }
 515     int off = sig_vk->at(i)._offset;
 516     assert(off > 0, "offset in object should be positive");
 517     VMRegPair pair = regs->at(j);
 518     address loc = reg_map.location(pair.first());
 519     switch(bt) {
 520     case T_BOOLEAN: {
 521       new_vt->bool_field_put(off, *(jboolean*)loc);
 522       break;
 523     }
 524     case T_CHAR: {
 525       new_vt->char_field_put(off, *(jchar*)loc);
 526       break;
 527     }
 528     case T_BYTE: {
 529       new_vt->byte_field_put(off, *(jbyte*)loc);
 530       break;
 531     }
 532     case T_SHORT: {
 533       new_vt->short_field_put(off, *(jshort*)loc);
 534       break;
 535     }
 536     case T_INT: {
 537       new_vt->int_field_put(off, *(jint*)loc);
 538       break;
 539     }
 540     case T_LONG: {
 541 #ifdef _LP64
 542       new_vt->double_field_put(off,  *(jdouble*)loc);
 543 #else
 544       Unimplemented();
 545 #endif
 546       break;
 547     }
 548     case T_OBJECT:
 549     case T_ARRAY: {
 550       Handle handle = handles.at(k++);
 551       new_vt->obj_field_put(off, handle());
 552       break;
 553     }
 554     case T_FLOAT: {
 555       new_vt->float_field_put(off,  *(jfloat*)loc);
 556       break;
 557     }
 558     case T_DOUBLE: {
 559       new_vt->double_field_put(off, *(jdouble*)loc);
 560       break;
 561     }
 562     default:
 563       ShouldNotReachHere();
 564     }
 565     *(intptr_t*)loc = 0xDEAD;
 566     j++;
 567   }
 568   assert(j == regs->length(), "missed a field?");
 569   assert(k == handles.length(), "missed an oop?");
 570   return new_vt;
 571 }
 572 
 573 // Check the return register for a ValueKlass oop
 574 ValueKlass* ValueKlass::returned_value_klass(const RegisterMap& map) {
 575   BasicType bt = T_METADATA;
 576   VMRegPair pair;
 577   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
 578   assert(nb == 1, "broken");
 579 
 580   address loc = map.location(pair.first());
 581   intptr_t ptr = *(intptr_t*)loc;
 582   if (is_set_nth_bit(ptr, 0)) {
 583     // Oop is tagged, must be a ValueKlass oop
 584     clear_nth_bit(ptr, 0);
 585     assert(Metaspace::contains((void*)ptr), "should be klass");
 586     ValueKlass* vk = (ValueKlass*)ptr;
 587     assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
 588     return vk;
 589   }
 590 #ifdef ASSERT
 591   // Oop is not tagged, must be a valid oop
 592   if (VerifyOops) {
 593     oopDesc::verify(oop((HeapWord*)ptr));
 594   }
 595 #endif
 596   return NULL;
 597 }
 598 
 599 void ValueKlass::verify_on(outputStream* st) {
 600   InstanceKlass::verify_on(st);
 601   guarantee(prototype_header()->is_always_locked(), "Prototype header is not always locked");
 602 }
 603 
 604 void ValueKlass::oop_verify_on(oop obj, outputStream* st) {
 605   InstanceKlass::oop_verify_on(obj, st);
 606   guarantee(obj->mark()->is_always_locked(), "Header is not always locked");
 607 }