1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/gcLocker.inline.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/metadataFactory.hpp"
  31 #include "oops/instanceKlass.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "oops/fieldStreams.hpp"
  34 #include "oops/method.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/valueKlass.hpp"
  37 #include "oops/valueArrayKlass.hpp"
  38 #include "runtime/signature.hpp"
  39 #include "utilities/copy.hpp"
  40 
  41 int ValueKlass::first_field_offset() const {
  42 #ifdef ASSERT
  43   int first_offset = INT_MAX;
  44   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  45     if (fs.offset() < first_offset) first_offset= fs.offset();
  46   }
  47 #endif
  48   int base_offset = instanceOopDesc::base_offset_in_bytes();
  49   // The first field of value types is aligned on a long boundary
  50   base_offset = align_up(base_offset, BytesPerLong);
  51   assert(base_offset == first_offset, "inconsistent offsets");
  52   return base_offset;
  53 }
  54 
  55 int ValueKlass::raw_value_byte_size() const {
  56   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  57   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  58   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  59   if (heapOopAlignedSize >= longSize || contains_oops()) {
  60     return heapOopAlignedSize;
  61   }
  62   // Small primitives...
  63   // If a few small basic type fields, return the actual size, i.e.
  64   // 1 byte = 1
  65   // 2 byte = 2
  66   // 3 byte = 4, because pow2 needed for element stores
  67   int first_offset = first_field_offset();
  68   int last_offset  = 0; // find the last offset, add basic type size
  69   int last_tsz     = 0;
  70   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  71     if (fs.access_flags().is_static()) {
  72       continue;
  73     } else if (fs.offset() > last_offset) {
  74       BasicType type = fs.field_descriptor().field_type();
  75       if (is_java_primitive(type)) {
  76         last_tsz = type2aelembytes(type);
  77       } else if (type == T_VALUETYPE) {
  78         // Not just primitives. Layout aligns embedded value, so use jlong aligned it is
  79         return heapOopAlignedSize;
  80       } else {
  81         guarantee(0, "Unknown type %d", type);
  82       }
  83       assert(last_tsz != 0, "Invariant");
  84       last_offset = fs.offset();
  85     }
  86   }
  87   // Assumes VT with no fields are meaningless and illegal
  88   last_offset += last_tsz;
  89   assert(last_offset > first_offset && last_tsz, "Invariant");
  90   return 1 << upper_log2(last_offset - first_offset);
  91 }
  92 
  93 instanceOop ValueKlass::allocate_instance(TRAPS) {
  94   int size = size_helper();  // Query before forming handle.
  95 
  96   return (instanceOop)CollectedHeap::obj_allocate(this, size, CHECK_NULL);
  97 }
  98 
  99 instanceOop ValueKlass::allocate_buffered_or_heap_instance(bool* in_heap, TRAPS) {
 100   assert(THREAD->is_Java_thread(), "Only Java threads can call this method");
 101 
 102   instanceOop value = NULL;
 103   if (is_bufferable()) {
 104     value = (instanceOop)VTBuffer::allocate_value(this, CHECK_NULL);
 105     *in_heap = false;
 106   }
 107   if (value == NULL) {
 108     log_info(valuetypes)("Value buffering failed, allocating in the Java heap");
 109     value = allocate_instance(CHECK_NULL);
 110     *in_heap = true;
 111   }
 112   return value;
 113 }
 114 
 115 bool ValueKlass::is_atomic() {
 116   return (nonstatic_field_size() * heapOopSize) <= longSize;
 117 }
 118 
 119 int ValueKlass::nonstatic_oop_count() {
 120   int oops = 0;
 121   int map_count = nonstatic_oop_map_count();
 122   OopMapBlock* block = start_of_nonstatic_oop_maps();
 123   OopMapBlock* end = block + map_count;
 124   while (block != end) {
 125     oops += block->count();
 126     block++;
 127   }
 128   return oops;
 129 }
 130 
 131 // Arrays of...
 132 
 133 bool ValueKlass::flatten_array() {
 134   if (!ValueArrayFlatten) {
 135     return false;
 136   }
 137 
 138   int elem_bytes = raw_value_byte_size();
 139   // Too big
 140   if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) {
 141     return false;
 142   }
 143   // Too many embedded oops
 144   if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) {
 145     return false;
 146   }
 147 
 148   return true;
 149 }
 150 
 151 
 152 Klass* ValueKlass::array_klass_impl(bool or_null, int n, TRAPS) {
 153   if (!flatten_array()) {
 154     return InstanceKlass::array_klass_impl(or_null, n, THREAD);
 155   }
 156 
 157   // Basically the same as instanceKlass, but using "ValueArrayKlass::allocate_klass"
 158   if (array_klasses() == NULL) {
 159     if (or_null) return NULL;
 160 
 161     ResourceMark rm;
 162     JavaThread *jt = (JavaThread *)THREAD;
 163     {
 164       // Atomic creation of array_klasses
 165       MutexLocker mc(Compile_lock, THREAD);   // for vtables
 166       MutexLocker ma(MultiArray_lock, THREAD);
 167 
 168       // Check if update has already taken place
 169       if (array_klasses() == NULL) {
 170         Klass* ak;
 171         if (is_atomic() || (!ValueArrayAtomicAccess)) {
 172           ak = ValueArrayKlass::allocate_klass(this, CHECK_NULL);
 173         } else {
 174           ak = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, CHECK_NULL);
 175         }
 176         set_array_klasses(ak);
 177       }
 178     }
 179   }
 180   // _this will always be set at this point
 181   ArrayKlass* ak = ArrayKlass::cast(array_klasses());
 182   if (or_null) {
 183     return ak->array_klass_or_null(n);
 184   }
 185   return ak->array_klass(n, THREAD);
 186 }
 187 
 188 Klass* ValueKlass::array_klass_impl(bool or_null, TRAPS) {
 189   return array_klass_impl(or_null, 1, THREAD);
 190 }
 191 
 192 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {
 193   /*
 194    * Try not to shear fields even if not an atomic store...
 195    *
 196    * First 3 cases handle value array store, otherwise works on the same basis
 197    * as JVM_Clone, at this size data is aligned. The order of primitive types
 198    * is largest to smallest, and it not possible for fields to stradle long
 199    * copy boundaries.
 200    *
 201    * If MT without exclusive access, possible to observe partial value store,
 202    * but not partial primitive and reference field values
 203    */
 204   switch (raw_byte_size) {
 205     case 1:
 206       *((jbyte*) dst) = *(jbyte*)src;
 207       break;
 208     case 2:
 209       *((jshort*) dst) = *(jshort*)src;
 210       break;
 211     case 4:
 212       *((jint*) dst) = *(jint*) src;
 213       break;
 214     default:
 215       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 216       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 217   }
 218 }
 219 
 220 /*
 221  * Store the value of this klass contained with src into dst.
 222  *
 223  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 224  *
 225  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 226  * so raw point is "safe" for now.
 227  *
 228  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 229  *
 230  */
 231 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 232   if (contains_oops()) {
 233     if (dst_heap) {
 234       // src/dst aren't oops, need offset to adjust oop map offset
 235       const address dst_oop_addr = ((address) dst) - first_field_offset();
 236 
 237       // Pre-barriers...
 238       OopMapBlock* map = start_of_nonstatic_oop_maps();
 239       OopMapBlock* const end = map + nonstatic_oop_map_count();
 240       while (map != end) {
 241         // Shame we can't just use the existing oop iterator...src/dst aren't oop
 242         address doop_address = dst_oop_addr + map->offset();
 243         // TEMP HACK: barrier code need to migrate to => access API (need own versions of value type ops)
 244         if (UseCompressedOops) {
 245           BarrierSet::barrier_set()->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized);
 246         } else {
 247           BarrierSet::barrier_set()->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized);
 248         }
 249         map++;
 250       }
 251 
 252       raw_field_copy(src, dst, raw_byte_size);
 253 
 254       // Post-barriers...
 255       map = start_of_nonstatic_oop_maps();
 256       while (map != end) {
 257         address doop_address = dst_oop_addr + map->offset();
 258         BarrierSet::barrier_set()->write_ref_array((HeapWord*) doop_address, map->count());
 259         map++;
 260       }
 261     } else { // Buffered value case
 262       raw_field_copy(src, dst, raw_byte_size);
 263     }
 264   } else {   // Primitive-only case...
 265     raw_field_copy(src, dst, raw_byte_size);
 266   }
 267 }
 268 
 269 // Value type arguments are not passed by reference, instead each
 270 // field of the value type is passed as an argument. This helper
 271 // function collects the fields of the value types (including embedded
 272 // value type's fields) in a list. Included with the field's type is
 273 // the offset of each field in the value type: i2c and c2i adapters
 274 // need that to load or store fields. Finally, the list of fields is
 275 // sorted in order of increasing offsets: the adapters and the
 276 // compiled code need and agreed upon order of fields.
 277 //
 278 // The list of basic types that is returned starts with a T_VALUETYPE
 279 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID are used as
 280 // delimiters. Every entry between the two is a field of the value
 281 // type. If there's an embedded value type in the list, it also starts
 282 // with a T_VALUETYPE and ends with a T_VOID. This is so we can
 283 // generate a unique fingerprint for the method's adapters and we can
 284 // generate the list of basic types from the interpreter point of view
 285 // (value types passed as reference: iterate on the list until a
 286 // T_VALUETYPE, drop everything until and including the closing
 287 // T_VOID) or the compiler point of view (each field of the value
 288 // types is an argument: drop all T_VALUETYPE/T_VOID from the list).
 289 GrowableArray<SigEntry> ValueKlass::collect_fields(int base_off) const {
 290   GrowableArray<SigEntry> sig_extended;
 291   sig_extended.push(SigEntry(T_VALUETYPE, base_off));
 292   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
 293     if (fs.access_flags().is_static()) continue;
 294     fieldDescriptor& fd = fs.field_descriptor();
 295     BasicType bt = fd.field_type();
 296     int offset = base_off + fd.offset() - (base_off > 0 ? first_field_offset() : 0);
 297     if (bt == T_VALUETYPE) {
 298       if (fd.is_flattened()) {
 299         Symbol* signature = fd.signature();
 300         JavaThread* THREAD = JavaThread::current();
 301         oop loader = class_loader();
 302         oop domain = protection_domain();
 303         ResetNoHandleMark rnhm;
 304         HandleMark hm;
 305         NoSafepointVerifier nsv;
 306         Klass* klass = SystemDictionary::resolve_or_null(signature,
 307                                                          Handle(THREAD, loader), Handle(THREAD, domain),
 308                                                          THREAD);
 309         assert(klass != NULL && !HAS_PENDING_EXCEPTION, "lookup shouldn't fail");
 310         const GrowableArray<SigEntry>& embedded = ValueKlass::cast(klass)->collect_fields(offset);
 311         sig_extended.appendAll(&embedded);
 312       } else {
 313         sig_extended.push(SigEntry(T_VALUETYPEPTR, offset));
 314       }
 315     } else {
 316       sig_extended.push(SigEntry(bt, offset));
 317       if (bt == T_LONG || bt == T_DOUBLE) {
 318         sig_extended.push(SigEntry(T_VOID, offset));
 319       }
 320     }
 321   }
 322   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0);
 323   sig_extended.push(SigEntry(T_VOID, offset)); // hack: use T_VOID to mark end of value type fields
 324   if (base_off == 0) {
 325     sig_extended.sort(SigEntry::compare);
 326   }
 327   assert(sig_extended.at(0)._bt == T_VALUETYPE && sig_extended.at(sig_extended.length()-1)._bt == T_VOID, "broken structure");
 328   return sig_extended;
 329 }
 330 
 331 void ValueKlass::initialize_calling_convention() {
 332   // Because the pack and unpack handler addresses need to be loadable from generated code,
 333   // they are stored at a fixed offset in the klass metadata. Since value type klasses do
 334   // not have a vtable, the vtable offset is used to store these addresses.
 335   //guarantee(vtable_length() == 0, "vtables are not supported in value klasses");
 336   if (ValueTypeReturnedAsFields || ValueTypePassFieldsAsArgs) {
 337     Thread* THREAD = Thread::current();
 338     assert(!HAS_PENDING_EXCEPTION, "should have no exception");
 339     ResourceMark rm;
 340     const GrowableArray<SigEntry>& sig_vk = collect_fields();
 341     int nb_fields = SigEntry::count_fields(sig_vk)+1;
 342     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK_AND_CLEAR);
 343     *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
 344     for (int i = 0; i < sig_vk.length(); i++) {
 345       extended_sig->at_put(i, sig_vk.at(i));
 346     }
 347 
 348     if (ValueTypeReturnedAsFields) {
 349       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
 350       sig_bt[0] = T_METADATA;
 351       SigEntry::fill_sig_bt(sig_vk, sig_bt+1, nb_fields-1, true);
 352       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
 353       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
 354 
 355       if (total > 0) {
 356         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK_AND_CLEAR);
 357         *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
 358         for (int i = 0; i < nb_fields; i++) {
 359           return_regs->at_put(i, regs[i]);
 360         }
 361 
 362         BufferedValueTypeBlob* buffered_blob = SharedRuntime::generate_buffered_value_type_adapter(this);
 363         *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
 364         *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
 365         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
 366       }
 367     }
 368   }
 369 }
 370 
 371 void ValueKlass::deallocate_contents(ClassLoaderData* loader_data) {
 372   if (extended_sig() != NULL) {
 373     MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
 374   }
 375   if (return_regs() != NULL) {
 376     MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
 377   }
 378   cleanup_blobs();
 379   InstanceKlass::deallocate_contents(loader_data);
 380 }
 381 
 382 void ValueKlass::cleanup(ValueKlass* ik) {
 383   ik->cleanup_blobs();
 384 }
 385 
 386 void ValueKlass::cleanup_blobs() {
 387   if (pack_handler() != NULL) {
 388     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
 389     assert(buffered_blob->is_buffered_value_type_blob(), "bad blob type");
 390     BufferBlob::free((BufferBlob*)buffered_blob);
 391     *((address*)adr_pack_handler()) = NULL;
 392     *((address*)adr_unpack_handler()) = NULL;
 393   }
 394 }
 395 
 396 // Can this value type be returned as multiple values?
 397 bool ValueKlass::can_be_returned_as_fields() const {
 398   return return_regs() != NULL;
 399 }
 400 
 401 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
 402 void ValueKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 403   Thread* thread = Thread::current();
 404   const Array<SigEntry>* sig_vk = extended_sig();
 405   const Array<VMRegPair>* regs = return_regs();
 406   int j = 1;
 407 
 408   for (int i = 0; i < sig_vk->length(); i++) {
 409     BasicType bt = sig_vk->at(i)._bt;
 410     if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {
 411       int off = sig_vk->at(i)._offset;
 412       VMRegPair pair = regs->at(j);
 413       address loc = reg_map.location(pair.first());
 414       oop v = *(oop*)loc;
 415       assert(v == NULL || oopDesc::is_oop(v), "not an oop?");
 416       assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
 417       handles.push(Handle(thread, v));
 418     }
 419     if (bt == T_VALUETYPE) {
 420       continue;
 421     }
 422     if (bt == T_VOID &&
 423         sig_vk->at(i-1)._bt != T_LONG &&
 424         sig_vk->at(i-1)._bt != T_DOUBLE) {
 425       continue;
 426     }
 427     j++;
 428   }
 429   assert(j == regs->length(), "missed a field?");
 430 }
 431 
 432 // Update oop fields in registers from handles after a safepoint
 433 void ValueKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 434   assert(ValueTypeReturnedAsFields, "inconsistent");
 435   const Array<SigEntry>* sig_vk = extended_sig();
 436   const Array<VMRegPair>* regs = return_regs();
 437   assert(regs != NULL, "inconsistent");
 438 
 439   int j = 1;
 440   for (int i = 0, k = 0; i < sig_vk->length(); i++) {
 441     BasicType bt = sig_vk->at(i)._bt;
 442     if (bt == T_OBJECT || bt == T_ARRAY) {
 443       int off = sig_vk->at(i)._offset;
 444       VMRegPair pair = regs->at(j);
 445       address loc = reg_map.location(pair.first());
 446       *(oop*)loc = handles.at(k++)();
 447     }
 448     if (bt == T_VALUETYPE) {
 449       continue;
 450     }
 451     if (bt == T_VOID &&
 452         sig_vk->at(i-1)._bt != T_LONG &&
 453         sig_vk->at(i-1)._bt != T_DOUBLE) {
 454       continue;
 455     }
 456     j++;
 457   }
 458   assert(j == regs->length(), "missed a field?");
 459 }
 460 
 461 // Fields are in registers. Create an instance of the value type and
 462 // initialize it with the values of the fields.
 463 oop ValueKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, bool buffered, TRAPS) {
 464   bool ignored = false;
 465   oop new_vt = NULL;
 466   if (buffered) {
 467     new_vt = allocate_buffered_or_heap_instance(&ignored, CHECK_NULL);
 468   } else {
 469     new_vt = allocate_instance(CHECK_NULL);
 470   }
 471 
 472   const Array<SigEntry>* sig_vk = extended_sig();
 473   const Array<VMRegPair>* regs = return_regs();
 474 
 475   int j = 1;
 476   int k = 0;
 477   for (int i = 0; i < sig_vk->length(); i++) {
 478     BasicType bt = sig_vk->at(i)._bt;
 479     if (bt == T_VALUETYPE) {
 480       continue;
 481     }
 482     if (bt == T_VOID) {
 483       if (sig_vk->at(i-1)._bt == T_LONG ||
 484           sig_vk->at(i-1)._bt == T_DOUBLE) {
 485         j++;
 486       }
 487       continue;
 488     }
 489     int off = sig_vk->at(i)._offset;
 490     VMRegPair pair = regs->at(j);
 491     address loc = reg_map.location(pair.first());
 492     switch(bt) {
 493     case T_BOOLEAN: {
 494       jboolean v = *(intptr_t*)loc;
 495       *(jboolean*)((address)new_vt + off) = v;
 496       break;
 497     }
 498     case T_CHAR: {
 499       jchar v = *(intptr_t*)loc;
 500       *(jchar*)((address)new_vt + off) = v;
 501       break;
 502     }
 503     case T_BYTE: {
 504       jbyte v = *(intptr_t*)loc;
 505       *(jbyte*)((address)new_vt + off) = v;
 506       break;
 507     }
 508     case T_SHORT: {
 509       jshort v = *(intptr_t*)loc;
 510       *(jshort*)((address)new_vt + off) = v;
 511       break;
 512     }
 513     case T_INT: {
 514       jint v = *(intptr_t*)loc;
 515       *(jint*)((address)new_vt + off) = v;
 516       break;
 517     }
 518     case T_LONG: {
 519 #ifdef _LP64
 520       jlong v = *(intptr_t*)loc;
 521       *(jlong*)((address)new_vt + off) = v;
 522 #else
 523       Unimplemented();
 524 #endif
 525       break;
 526     }
 527     case T_OBJECT:
 528     case T_VALUETYPEPTR:
 529     case T_ARRAY: {
 530       Handle handle = handles.at(k++);
 531       oop v = handle();
 532       if (!UseCompressedOops) {
 533         oop* p = (oop*)((address)new_vt + off);
 534         oopDesc::store_heap_oop(p, v);
 535       } else {
 536         narrowOop* p = (narrowOop*)((address)new_vt + off);
 537         oopDesc::encode_store_heap_oop(p, v);
 538       }
 539       break;
 540     }
 541     case T_FLOAT: {
 542       jfloat v = *(jfloat*)loc;
 543       *(jfloat*)((address)new_vt + off) = v;
 544       break;
 545     }
 546     case T_DOUBLE: {
 547       jdouble v = *(jdouble*)loc;
 548       *(jdouble*)((address)new_vt + off) = v;
 549       break;
 550     }
 551     default:
 552       ShouldNotReachHere();
 553     }
 554     *(intptr_t*)loc = 0xDEAD;
 555     j++;
 556   }
 557   assert(j == regs->length(), "missed a field?");
 558   assert(k == handles.length(), "missed an oop?");
 559   return new_vt;
 560 }
 561 
 562 // Check the return register for a ValueKlass oop
 563 ValueKlass* ValueKlass::returned_value_klass(const RegisterMap& map) {
 564   BasicType bt = T_METADATA;
 565   VMRegPair pair;
 566   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
 567   assert(nb == 1, "broken");
 568 
 569   address loc = map.location(pair.first());
 570   intptr_t ptr = *(intptr_t*)loc;
 571   if (is_set_nth_bit(ptr, 0)) {
 572     // Oop is tagged, must be a ValueKlass oop
 573     clear_nth_bit(ptr, 0);
 574     assert(Metaspace::contains((void*)ptr), "should be klass");
 575     ValueKlass* vk = (ValueKlass*)ptr;
 576     assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
 577     return vk;
 578   }
 579 #ifdef ASSERT
 580   // Oop is not tagged, must be a valid oop
 581   if (VerifyOops) {
 582     oop((HeapWord*)ptr)->verify();
 583   }
 584 #endif
 585   return NULL;
 586 }
 587 
 588 void ValueKlass::iterate_over_inside_oops(OopClosure* f, oop value) {
 589   assert(!Universe::heap()->is_in_reserved(value), "This method is used on buffered values");
 590 
 591   oop* addr_mirror = (oop*)(value)->mark_addr();
 592   f->do_oop_no_buffering(addr_mirror);
 593 
 594   if (!contains_oops()) return;
 595 
 596   OopMapBlock* map = start_of_nonstatic_oop_maps();
 597   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
 598 
 599   if (!UseCompressedOops) {
 600     for (; map < end_map; map++) {
 601       oop* p = (oop*) (((char*)(oopDesc*)value) + map->offset());
 602       oop* const end = p + map->count();
 603       for (; p < end; ++p) {
 604         assert(oopDesc::is_oop_or_null(*p), "Sanity check");
 605         f->do_oop(p);
 606       }
 607     }
 608   } else {
 609     for (; map < end_map; map++) {
 610       narrowOop* p = (narrowOop*) (((char*)(oopDesc*)value) + map->offset());
 611       narrowOop* const end = p + map->count();
 612       for (; p < end; ++p) {
 613         oop o = oopDesc::decode_heap_oop(*p);
 614         assert(Universe::heap()->is_in_reserved_or_null(o), "Sanity check");
 615         assert(oopDesc::is_oop_or_null(o), "Sanity check");
 616         f->do_oop(p);
 617       }
 618     }
 619   }
 620 }