1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/gcLocker.inline.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "logging/log.hpp"
  29 #include "memory/metadataFactory.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "oops/fieldStreams.hpp"
  32 #include "oops/method.hpp"
  33 #include "oops/objArrayKlass.hpp"
  34 #include "oops/valueKlass.hpp"
  35 #include "oops/valueArrayKlass.hpp"
  36 #include "runtime/signature.hpp"
  37 #include "utilities/copy.hpp"
  38 
  39 int ValueKlass::first_field_offset() const {
  40 #ifdef ASSERT
  41   int first_offset = INT_MAX;
  42   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  43     if (fs.offset() < first_offset) first_offset= fs.offset();
  44   }
  45 #endif
  46   int base_offset = instanceOopDesc::base_offset_in_bytes();
  47   // The first field of value types is aligned on a long boundary
  48   base_offset = align_up(base_offset, BytesPerLong);
  49   assert(base_offset == first_offset, "inconsistent offsets");
  50   return base_offset;
  51 }
  52 
  53 int ValueKlass::raw_value_byte_size() const {
  54   assert(this != SystemDictionary::___Value_klass(),
  55       "This is not the value type klass you are looking for");
  56   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  57   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  58   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  59   if (heapOopAlignedSize >= longSize || contains_oops()) {
  60     return heapOopAlignedSize;
  61   }
  62   // Small primitives...
  63   // If a few small basic type fields, return the actual size, i.e.
  64   // 1 byte = 1
  65   // 2 byte = 2
  66   // 3 byte = 4, because pow2 needed for element stores
  67   int first_offset = first_field_offset();
  68   int last_offset  = 0; // find the last offset, add basic type size
  69   int last_tsz     = 0;
  70   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  71     if (fs.offset() > last_offset) {
  72       BasicType type = fs.field_descriptor().field_type();
  73       if (is_java_primitive(type)) {
  74         last_tsz = type2aelembytes(type);
  75       } else if (type == T_VALUETYPE) {
  76         // Not just primitives. Layout aligns embedded value, so use jlong aligned it is
  77         return heapOopAlignedSize;
  78       } else {
  79         guarantee(0, "Unknown type %d", type);
  80       }
  81       assert(last_tsz != 0, "Invariant");
  82       last_offset = fs.offset();
  83     }
  84   }
  85   // Assumes VT with no fields are meaningless and illegal
  86   last_offset += last_tsz;
  87   assert(last_offset > first_offset && last_tsz, "Invariant");
  88   return 1 << upper_log2(last_offset - first_offset);
  89 }
  90 
  91 instanceOop ValueKlass::allocate_instance(TRAPS) {
  92   int size = size_helper();  // Query before forming handle.
  93 
  94   return (instanceOop)CollectedHeap::obj_allocate(this, size, CHECK_NULL);
  95 }
  96 
  97 instanceOop ValueKlass::allocate_buffered_or_heap_instance(bool* in_heap, TRAPS) {
  98   assert(THREAD->is_Java_thread(), "Only Java threads can call this method");
  99 
 100   instanceOop value = NULL;
 101   if (is_bufferable()) {
 102     value = (instanceOop)VTBuffer::allocate_value(this, CHECK_NULL);
 103     *in_heap = false;
 104   }
 105   if (value == NULL) {
 106     log_info(valuetypes)("Value buffering failed, allocating in the Java heap");
 107     value = allocate_instance(CHECK_NULL);
 108     *in_heap = true;
 109   }
 110   return value;
 111 }
 112 
 113 bool ValueKlass::is_atomic() {
 114   return (nonstatic_field_size() * heapOopSize) <= longSize;
 115 }
 116 
 117 int ValueKlass::nonstatic_oop_count() {
 118   int oops = 0;
 119   int map_count = nonstatic_oop_map_count();
 120   OopMapBlock* block = start_of_nonstatic_oop_maps();
 121   OopMapBlock* end = block + map_count;
 122   while (block != end) {
 123     oops += block->count();
 124     block++;
 125   }
 126   return oops;
 127 }
 128 
 129 // Arrays of...
 130 
 131 bool ValueKlass::flatten_array() {
 132   if (!ValueArrayFlatten) {
 133     return false;
 134   }
 135 
 136   int elem_bytes = raw_value_byte_size();
 137   // Too big
 138   if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) {
 139     return false;
 140   }
 141   // Too many embedded oops
 142   if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) {
 143     return false;
 144   }
 145 
 146   return true;
 147 }
 148 
 149 
 150 Klass* ValueKlass::array_klass_impl(bool or_null, int n, TRAPS) {
 151   if (!flatten_array()) {
 152     return InstanceKlass::array_klass_impl(or_null, n, THREAD);
 153   }
 154 
 155   // Basically the same as instanceKlass, but using "ValueArrayKlass::allocate_klass"
 156   if (array_klasses() == NULL) {
 157     if (or_null) return NULL;
 158 
 159     ResourceMark rm;
 160     JavaThread *jt = (JavaThread *)THREAD;
 161     {
 162       // Atomic creation of array_klasses
 163       MutexLocker mc(Compile_lock, THREAD);   // for vtables
 164       MutexLocker ma(MultiArray_lock, THREAD);
 165 
 166       // Check if update has already taken place
 167       if (array_klasses() == NULL) {
 168         Klass* ak;
 169         if (is_atomic() || (!ValueArrayAtomicAccess)) {
 170           ak = ValueArrayKlass::allocate_klass(this, CHECK_NULL);
 171         } else {
 172           ak = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, CHECK_NULL);
 173         }
 174         set_array_klasses(ak);
 175       }
 176     }
 177   }
 178   // _this will always be set at this point
 179   ArrayKlass* ak = ArrayKlass::cast(array_klasses());
 180   if (or_null) {
 181     return ak->array_klass_or_null(n);
 182   }
 183   return ak->array_klass(n, THREAD);
 184 }
 185 
 186 Klass* ValueKlass::array_klass_impl(bool or_null, TRAPS) {
 187   return array_klass_impl(or_null, 1, THREAD);
 188 }
 189 
 190 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {
 191   /*
 192    * Try not to shear fields even if not an atomic store...
 193    *
 194    * First 3 cases handle value array store, otherwise works on the same basis
 195    * as JVM_Clone, at this size data is aligned. The order of primitive types
 196    * is largest to smallest, and it not possible for fields to stradle long
 197    * copy boundaries.
 198    *
 199    * If MT without exclusive access, possible to observe partial value store,
 200    * but not partial primitive and reference field values
 201    */
 202   switch (raw_byte_size) {
 203     case 1:
 204       *((jbyte*) dst) = *(jbyte*)src;
 205       break;
 206     case 2:
 207       *((jshort*) dst) = *(jshort*)src;
 208       break;
 209     case 4:
 210       *((jint*) dst) = *(jint*) src;
 211       break;
 212     default:
 213       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 214       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 215   }
 216 }
 217 
 218 /*
 219  * Store the value of this klass contained with src into dst.
 220  *
 221  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 222  *
 223  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 224  * so raw point is "safe" for now.
 225  *
 226  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 227  *
 228  */
 229 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 230   if (contains_oops() && dst_heap) {
 231     // src/dst aren't oops, need offset to adjust oop map offset
 232     const address dst_oop_addr = ((address) dst) - first_field_offset();
 233 
 234     // Pre-barriers...
 235     OopMapBlock* map = start_of_nonstatic_oop_maps();
 236     OopMapBlock* const end = map + nonstatic_oop_map_count();
 237     while (map != end) {
 238       // Shame we can't just use the existing oop iterator...src/dst aren't oop
 239       address doop_address = dst_oop_addr + map->offset();
 240       if (UseCompressedOops) {
 241         oopDesc::bs()->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized);
 242       } else {
 243         oopDesc::bs()->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized);
 244       }
 245       map++;
 246     }
 247 
 248     raw_field_copy(src, dst, raw_byte_size);
 249 
 250     // Post-barriers...
 251     map = start_of_nonstatic_oop_maps();
 252     while (map != end) {
 253       address doop_address = dst_oop_addr + map->offset();
 254       oopDesc::bs()->write_ref_array((HeapWord*) doop_address, map->count());
 255       map++;
 256     }
 257   } else {   // Primitive-only case...
 258     raw_field_copy(src, dst, raw_byte_size);
 259   }
 260 }
 261 
 262 oop ValueKlass::box(Handle src, InstanceKlass* target_klass, TRAPS) {
 263   assert(src()->klass()->is_value(), "src must be a value type");
 264   assert(!target_klass->is_value(), "target_klass must not be a value type");
 265 
 266   target_klass->initialize(CHECK_0);
 267   instanceOop box = target_klass->allocate_instance(CHECK_0);
 268   value_store(data_for_oop(src()), data_for_oop(box), true, false);
 269 
 270   assert(!box->klass()->is_value(), "Sanity check");
 271   return box;
 272 }
 273 
 274 oop ValueKlass::unbox(Handle src, InstanceKlass* target_klass, TRAPS) {
 275   assert(!src()->klass()->is_value(), "src must not be a value type");
 276   assert(target_klass->is_value(), "target_klass must be a value type");
 277   ValueKlass* vtklass = ValueKlass::cast(target_klass);
 278 
 279   vtklass->initialize(CHECK_0);
 280   bool in_heap;
 281   instanceOop value = vtklass->allocate_buffered_or_heap_instance(&in_heap, CHECK_0);
 282   value_store(data_for_oop(src()), data_for_oop(value), in_heap, false);
 283 
 284   assert(value->klass()->is_value(), "Sanity check");
 285   return value;
 286 }
 287 
 288 // Value type arguments are not passed by reference, instead each
 289 // field of the value type is passed as an argument. This helper
 290 // function collects the fields of the value types (including embedded
 291 // value type's fields) in a list. Included with the field's type is
 292 // the offset of each field in the value type: i2c and c2i adapters
 293 // need that to load or store fields. Finally, the list of fields is
 294 // sorted in order of increasing offsets: the adapters and the
 295 // compiled code need and agreed upon order of fields.
 296 //
 297 // The list of basic types that is returned starts with a T_VALUETYPE
 298 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID are used as
 299 // delimiters. Every entry between the two is a field of the value
 300 // type. If there's an embedded value type in the list, it also starts
 301 // with a T_VALUETYPE and ends with a T_VOID. This is so we can
 302 // generate a unique fingerprint for the method's adapters and we can
 303 // generate the list of basic types from the interpreter point of view
 304 // (value types passed as reference: iterate on the list until a
 305 // T_VALUETYPE, drop everything until and including the closing
 306 // T_VOID) or the compiler point of view (each field of the value
 307 // types is an argument: drop all T_VALUETYPE/T_VOID from the list).
 308 GrowableArray<SigEntry> ValueKlass::collect_fields(int base_off) const {
 309   GrowableArray<SigEntry> sig_extended;
 310   sig_extended.push(SigEntry(T_VALUETYPE, base_off));
 311   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
 312     if (fs.access_flags().is_static())  continue;
 313     fieldDescriptor& fd = fs.field_descriptor();
 314     BasicType bt = fd.field_type();
 315     int offset = base_off + fd.offset() - (base_off > 0 ? first_field_offset() : 0);
 316     if (bt == T_VALUETYPE) {
 317       if (fd.is_flatten()) {
 318       Symbol* signature = fd.signature();
 319       JavaThread* THREAD = JavaThread::current();
 320       oop loader = class_loader();
 321       oop domain = protection_domain();
 322       ResetNoHandleMark rnhm;
 323       HandleMark hm;
 324       NoSafepointVerifier nsv;
 325       Klass* klass = SystemDictionary::resolve_or_null(signature,
 326                                                        Handle(THREAD, loader), Handle(THREAD, domain),
 327                                                        THREAD);
 328       assert(klass != NULL && !HAS_PENDING_EXCEPTION, "lookup shouldn't fail");
 329       const GrowableArray<SigEntry>& embedded = ValueKlass::cast(klass)->collect_fields(offset);
 330       sig_extended.appendAll(&embedded);
 331       } else {
 332         sig_extended.push(SigEntry(T_OBJECT, offset));
 333       }
 334     } else {
 335       sig_extended.push(SigEntry(bt, offset));
 336       if (bt == T_LONG || bt == T_DOUBLE) {
 337         sig_extended.push(SigEntry(T_VOID, offset));
 338       }
 339     }
 340   }
 341   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0);
 342   sig_extended.push(SigEntry(T_VOID, offset)); // hack: use T_VOID to mark end of value type fields
 343   if (base_off == 0) {
 344     sig_extended.sort(SigEntry::compare);
 345   }
 346   assert(sig_extended.at(0)._bt == T_VALUETYPE && sig_extended.at(sig_extended.length()-1)._bt == T_VOID, "broken structure");
 347   return sig_extended;
 348 }
 349 
 350 void ValueKlass::initialize_calling_convention() {
 351   // Because the pack and unpack handler addresses need to be loadable from generated code,
 352   // they are stored at a fixed offset in the klass metadata. Since value type klasses do
 353   // not have a vtable, the vtable offset is used to store these addresses.
 354   guarantee(vtable_length() == 0, "vtables are not supported in value klasses");
 355   if (ValueTypeReturnedAsFields || ValueTypePassFieldsAsArgs) {
 356     Thread* THREAD = Thread::current();
 357     assert(!HAS_PENDING_EXCEPTION, "should have no exception");
 358     ResourceMark rm;
 359     const GrowableArray<SigEntry>& sig_vk = collect_fields();
 360     int nb_fields = SigEntry::count_fields(sig_vk)+1;
 361     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK_AND_CLEAR);
 362     *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
 363     for (int i = 0; i < sig_vk.length(); i++) {
 364       extended_sig->at_put(i, sig_vk.at(i));
 365     }
 366 
 367     if (ValueTypeReturnedAsFields) {
 368       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
 369       sig_bt[0] = T_METADATA;
 370       SigEntry::fill_sig_bt(sig_vk, sig_bt+1, nb_fields-1, true);
 371       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
 372       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
 373 
 374       if (total > 0) {
 375         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK_AND_CLEAR);
 376         *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
 377         for (int i = 0; i < nb_fields; i++) {
 378           return_regs->at_put(i, regs[i]);
 379         }
 380 
 381         BufferedValueTypeBlob* buffered_blob = SharedRuntime::generate_buffered_value_type_adapter(this);
 382         *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
 383         *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
 384         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
 385       }
 386     }
 387   }
 388 }
 389 
 390 void ValueKlass::deallocate_contents(ClassLoaderData* loader_data) {
 391   if (extended_sig() != NULL) {
 392     MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
 393   }
 394   if (return_regs() != NULL) {
 395     MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
 396   }
 397   cleanup_blobs();
 398   InstanceKlass::deallocate_contents(loader_data);
 399 }
 400 
 401 void ValueKlass::cleanup(ValueKlass* ik) {
 402   ik->cleanup_blobs();
 403 }
 404 
 405 void ValueKlass::cleanup_blobs() {
 406   if (pack_handler() != NULL) {
 407     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
 408     assert(buffered_blob->is_buffered_value_type_blob(), "bad blob type");
 409     BufferBlob::free((BufferBlob*)buffered_blob);
 410     *((address*)adr_pack_handler()) = NULL;
 411     *((address*)adr_unpack_handler()) = NULL;
 412   }
 413 }
 414 
 415 // Create handles for all oop fields returned in registers that are
 416 // going to be live across a safepoint.
 417 bool ValueKlass::save_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 418   if (ValueTypeReturnedAsFields) {
 419     if (return_regs() != NULL) {
 420       save_oop_fields(reg_map, handles);
 421       return true;
 422     }
 423   }
 424   return false;
 425 }
 426 
 427 // Same as above but with pre-computed return convention
 428 void ValueKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 429   Thread* thread = Thread::current();
 430   const Array<SigEntry>* sig_vk = extended_sig();
 431   const Array<VMRegPair>* regs = return_regs();
 432   int j = 1;
 433 
 434   for (int i = 0; i < sig_vk->length(); i++) {
 435     BasicType bt = sig_vk->at(i)._bt;
 436     if (bt == T_OBJECT || bt == T_ARRAY) {
 437       int off = sig_vk->at(i)._offset;
 438       VMRegPair pair = regs->at(j);
 439       address loc = reg_map.location(pair.first());
 440       oop v = *(oop*)loc;
 441       assert(v == NULL || oopDesc::is_oop(v), "not an oop?");
 442       assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
 443       handles.push(Handle(thread, v));
 444     }
 445     if (bt == T_VALUETYPE) {
 446       continue;
 447     }
 448     if (bt == T_VOID &&
 449         sig_vk->at(i-1)._bt != T_LONG &&
 450         sig_vk->at(i-1)._bt != T_DOUBLE) {
 451       continue;
 452     }
 453     j++;
 454   }
 455   assert(j == regs->length(), "missed a field?");
 456 }
 457 
 458 // Update oop fields in registers from handles after a safepoint
 459 void ValueKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 460   assert(ValueTypeReturnedAsFields, "inconsistent");
 461   const Array<SigEntry>* sig_vk = extended_sig();
 462   const Array<VMRegPair>* regs = return_regs();
 463   assert(regs != NULL, "inconsistent");
 464 
 465   int j = 1;
 466   for (int i = 0, k = 0; i < sig_vk->length(); i++) {
 467     BasicType bt = sig_vk->at(i)._bt;
 468     if (bt == T_OBJECT || bt == T_ARRAY) {
 469       int off = sig_vk->at(i)._offset;
 470       VMRegPair pair = regs->at(j);
 471       address loc = reg_map.location(pair.first());
 472       *(oop*)loc = handles.at(k++)();
 473     }
 474     if (bt == T_VALUETYPE) {
 475       continue;
 476     }
 477     if (bt == T_VOID &&
 478         sig_vk->at(i-1)._bt != T_LONG &&
 479         sig_vk->at(i-1)._bt != T_DOUBLE) {
 480       continue;
 481     }
 482     j++;
 483   }
 484   assert(j == regs->length(), "missed a field?");
 485 }
 486 
 487 // Fields are in registers. Create an instance of the value type and
 488 // initialize it with the values of the fields.
 489 oop ValueKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, bool buffered, TRAPS) {
 490   bool ignored = false;
 491   oop new_vt = NULL;
 492   if (buffered) {
 493     new_vt = allocate_buffered_or_heap_instance(&ignored, CHECK_NULL);
 494   } else {
 495     new_vt = allocate_instance(CHECK_NULL);
 496   }
 497 
 498   const Array<SigEntry>* sig_vk = extended_sig();
 499   const Array<VMRegPair>* regs = return_regs();
 500 
 501   int j = 1;
 502   int k = 0;
 503   for (int i = 0; i < sig_vk->length(); i++) {
 504     BasicType bt = sig_vk->at(i)._bt;
 505     if (bt == T_VALUETYPE) {
 506       continue;
 507     }
 508     if (bt == T_VOID) {
 509       if (sig_vk->at(i-1)._bt == T_LONG ||
 510           sig_vk->at(i-1)._bt == T_DOUBLE) {
 511         j++;
 512       }
 513       continue;
 514     }
 515     int off = sig_vk->at(i)._offset;
 516     VMRegPair pair = regs->at(j);
 517     address loc = reg_map.location(pair.first());
 518     switch(bt) {
 519     case T_BOOLEAN: {
 520       jboolean v = *(intptr_t*)loc;
 521       *(jboolean*)((address)new_vt + off) = v;
 522       break;
 523     }
 524     case T_CHAR: {
 525       jchar v = *(intptr_t*)loc;
 526       *(jchar*)((address)new_vt + off) = v;
 527       break;
 528     }
 529     case T_BYTE: {
 530       jbyte v = *(intptr_t*)loc;
 531       *(jbyte*)((address)new_vt + off) = v;
 532       break;
 533     }
 534     case T_SHORT: {
 535       jshort v = *(intptr_t*)loc;
 536       *(jshort*)((address)new_vt + off) = v;
 537       break;
 538     }
 539     case T_INT: {
 540       jint v = *(intptr_t*)loc;
 541       *(jint*)((address)new_vt + off) = v;
 542       break;
 543     }
 544     case T_LONG: {
 545 #ifdef _LP64
 546       jlong v = *(intptr_t*)loc;
 547       *(jlong*)((address)new_vt + off) = v;
 548 #else
 549       Unimplemented();
 550 #endif
 551       break;
 552     }
 553     case T_OBJECT:
 554     case T_ARRAY: {
 555       Handle handle = handles.at(k++);
 556       oop v = handle();
 557       if (!UseCompressedOops) {
 558         oop* p = (oop*)((address)new_vt + off);
 559         oopDesc::store_heap_oop(p, v);
 560       } else {
 561         narrowOop* p = (narrowOop*)((address)new_vt + off);
 562         oopDesc::encode_store_heap_oop(p, v);
 563       }
 564       break;
 565     }
 566     case T_FLOAT: {
 567       jfloat v = *(jfloat*)loc;
 568       *(jfloat*)((address)new_vt + off) = v;
 569       break;
 570     }
 571     case T_DOUBLE: {
 572       jdouble v = *(jdouble*)loc;
 573       *(jdouble*)((address)new_vt + off) = v;
 574       break;
 575     }
 576     default:
 577       ShouldNotReachHere();
 578     }
 579     *(intptr_t*)loc = 0xDEAD;
 580     j++;
 581   }
 582   assert(j == regs->length(), "missed a field?");
 583   assert(k == handles.length(), "missed an oop?");
 584   return new_vt;
 585 }
 586 
 587 ValueKlass* ValueKlass::returned_value_type(const RegisterMap& map) {
 588   BasicType bt = T_METADATA;
 589   VMRegPair pair;
 590   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
 591   assert(nb == 1, "broken");
 592 
 593   address loc = map.location(pair.first());
 594   intptr_t ptr = *(intptr_t*)loc;
 595   if (is_set_nth_bit(ptr, 0)) {
 596     clear_nth_bit(ptr, 0);
 597     assert(Metaspace::contains((void*)ptr), "should be klass");
 598     return (ValueKlass*)ptr;
 599   }
 600   return NULL;
 601 }