1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/collectedHeap.inline.hpp"
  28 #include "gc/shared/gcLocker.inline.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "logging/log.hpp"
  31 #include "memory/metadataFactory.hpp"
  32 #include "oops/access.hpp"
  33 #include "oops/compressedOops.inline.hpp"
  34 #include "oops/fieldStreams.hpp"
  35 #include "oops/instanceKlass.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/valueKlass.hpp"
  40 #include "oops/valueArrayKlass.hpp"
  41 #include "runtime/fieldDescriptor.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 int ValueKlass::first_field_offset() const {
  49 #ifdef ASSERT
  50   int first_offset = INT_MAX;
  51   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  52     if (fs.offset() < first_offset) first_offset= fs.offset();
  53   }
  54 #endif
  55   int base_offset = instanceOopDesc::base_offset_in_bytes();
  56   // The first field of value types is aligned on a long boundary
  57   base_offset = align_up(base_offset, BytesPerLong);
  58   assert(base_offset == first_offset, "inconsistent offsets");
  59   return base_offset;
  60 }
  61 
  62 int ValueKlass::raw_value_byte_size() const {
  63   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  64   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  65   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  66   if (heapOopAlignedSize >= longSize || contains_oops()) {
  67     return heapOopAlignedSize;
  68   }
  69   // Small primitives...
  70   // If a few small basic type fields, return the actual size, i.e.
  71   // 1 byte = 1
  72   // 2 byte = 2
  73   // 3 byte = 4, because pow2 needed for element stores
  74   int first_offset = first_field_offset();
  75   int last_offset  = 0; // find the last offset, add basic type size
  76   int last_tsz     = 0;
  77   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  78     if (fs.access_flags().is_static()) {
  79       continue;
  80     } else if (fs.offset() > last_offset) {
  81       BasicType type = fs.field_descriptor().field_type();
  82       if (is_java_primitive(type)) {
  83         last_tsz = type2aelembytes(type);
  84       } else if (type == T_VALUETYPE) {
  85         // Not just primitives. Layout aligns embedded value, so use jlong aligned it is
  86         return heapOopAlignedSize;
  87       } else {
  88         guarantee(0, "Unknown type %d", type);
  89       }
  90       assert(last_tsz != 0, "Invariant");
  91       last_offset = fs.offset();
  92     }
  93   }
  94   // Assumes VT with no fields are meaningless and illegal
  95   last_offset += last_tsz;
  96   assert(last_offset > first_offset && last_tsz, "Invariant");
  97   return 1 << upper_log2(last_offset - first_offset);
  98 }
  99 
 100 instanceOop ValueKlass::allocate_instance(TRAPS) {
 101   int size = size_helper();  // Query before forming handle.
 102 
 103   instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
 104   assert(oop->mark()->is_always_locked(), "Unlocked value type");
 105   return oop;
 106 }
 107 
 108 instanceOop ValueKlass::allocate_buffered_or_heap_instance(bool* in_heap, TRAPS) {
 109   assert(THREAD->is_Java_thread(), "Only Java threads can call this method");
 110 
 111   instanceOop value = NULL;
 112   if (is_bufferable()) {
 113     value = (instanceOop)VTBuffer::allocate_value(this, CHECK_NULL);
 114     *in_heap = false;
 115   }
 116   if (value == NULL) {
 117     log_info(valuetypes)("Value buffering failed, allocating in the Java heap");
 118     value = allocate_instance(CHECK_NULL);
 119     *in_heap = true;
 120   }
 121   return value;
 122 }
 123 
 124 bool ValueKlass::is_atomic() {
 125   return (nonstatic_field_size() * heapOopSize) <= longSize;
 126 }
 127 
 128 int ValueKlass::nonstatic_oop_count() {
 129   int oops = 0;
 130   int map_count = nonstatic_oop_map_count();
 131   OopMapBlock* block = start_of_nonstatic_oop_maps();
 132   OopMapBlock* end = block + map_count;
 133   while (block != end) {
 134     oops += block->count();
 135     block++;
 136   }
 137   return oops;
 138 }
 139 
 140 // Arrays of...
 141 
 142 bool ValueKlass::flatten_array() {
 143   if (!ValueArrayFlatten) {
 144     return false;
 145   }
 146 
 147   int elem_bytes = raw_value_byte_size();
 148   // Too big
 149   if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) {
 150     return false;
 151   }
 152   // Too many embedded oops
 153   if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) {
 154     return false;
 155   }
 156 
 157   return true;
 158 }
 159 
 160 
 161 Klass* ValueKlass::array_klass_impl(bool or_null, int n, TRAPS) {
 162   if (!flatten_array()) {
 163     return InstanceKlass::array_klass_impl(or_null, n, THREAD);
 164   }
 165 
 166   // Basically the same as instanceKlass, but using "ValueArrayKlass::allocate_klass"
 167   if (array_klasses() == NULL) {
 168     if (or_null) return NULL;
 169 
 170     ResourceMark rm;
 171     JavaThread *jt = (JavaThread *)THREAD;
 172     {
 173       // Atomic creation of array_klasses
 174       MutexLocker mc(Compile_lock, THREAD);   // for vtables
 175       MutexLocker ma(MultiArray_lock, THREAD);
 176 
 177       // Check if update has already taken place
 178       if (array_klasses() == NULL) {
 179         Klass* ak;
 180         if (is_atomic() || (!ValueArrayAtomicAccess)) {
 181           ak = ValueArrayKlass::allocate_klass(this, CHECK_NULL);
 182         } else {
 183           ak = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, CHECK_NULL);
 184         }
 185         set_array_klasses(ak);
 186       }
 187     }
 188   }
 189   // _this will always be set at this point
 190   ArrayKlass* ak = ArrayKlass::cast(array_klasses());
 191   if (or_null) {
 192     return ak->array_klass_or_null(n);
 193   }
 194   return ak->array_klass(n, THREAD);
 195 }
 196 
 197 Klass* ValueKlass::array_klass_impl(bool or_null, TRAPS) {
 198   return array_klass_impl(or_null, 1, THREAD);
 199 }
 200 
 201 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {
 202   /*
 203    * Try not to shear fields even if not an atomic store...
 204    *
 205    * First 3 cases handle value array store, otherwise works on the same basis
 206    * as JVM_Clone, at this size data is aligned. The order of primitive types
 207    * is largest to smallest, and it not possible for fields to stradle long
 208    * copy boundaries.
 209    *
 210    * If MT without exclusive access, possible to observe partial value store,
 211    * but not partial primitive and reference field values
 212    */
 213   switch (raw_byte_size) {
 214     case 1:
 215       *((jbyte*) dst) = *(jbyte*)src;
 216       break;
 217     case 2:
 218       *((jshort*) dst) = *(jshort*)src;
 219       break;
 220     case 4:
 221       *((jint*) dst) = *(jint*) src;
 222       break;
 223     default:
 224       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 225       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 226   }
 227 }
 228 
 229 /*
 230  * Store the value of this klass contained with src into dst.
 231  *
 232  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 233  *
 234  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 235  * so raw point is "safe" for now.
 236  *
 237  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 238  *
 239  */
 240 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 241   if (contains_oops()) {
 242     if (dst_heap) {
 243       // src/dst aren't oops, need offset to adjust oop map offset
 244       const address dst_oop_addr = ((address) dst) - first_field_offset();
 245 
 246       ModRefBarrierSet* bs = barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set());
 247 
 248       // Pre-barriers...
 249       OopMapBlock* map = start_of_nonstatic_oop_maps();
 250       OopMapBlock* const end = map + nonstatic_oop_map_count();
 251       while (map != end) {
 252         // Shame we can't just use the existing oop iterator...src/dst aren't oop
 253         address doop_address = dst_oop_addr + map->offset();
 254         // TEMP HACK: barrier code need to migrate to => access API (need own versions of value type ops)
 255         if (UseCompressedOops) {
 256           bs->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized);
 257         } else {
 258           bs->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized);
 259         }
 260         map++;
 261       }
 262 
 263       raw_field_copy(src, dst, raw_byte_size);
 264 
 265       // Post-barriers...
 266       map = start_of_nonstatic_oop_maps();
 267       while (map != end) {
 268         address doop_address = dst_oop_addr + map->offset();
 269         bs->write_ref_array((HeapWord*) doop_address, map->count());
 270         map++;
 271       }
 272     } else { // Buffered value case
 273       raw_field_copy(src, dst, raw_byte_size);
 274     }
 275   } else {   // Primitive-only case...
 276     raw_field_copy(src, dst, raw_byte_size);
 277   }
 278 }
 279 
 280 // Value type arguments are not passed by reference, instead each
 281 // field of the value type is passed as an argument. This helper
 282 // function collects the fields of the value types (including embedded
 283 // value type's fields) in a list. Included with the field's type is
 284 // the offset of each field in the value type: i2c and c2i adapters
 285 // need that to load or store fields. Finally, the list of fields is
 286 // sorted in order of increasing offsets: the adapters and the
 287 // compiled code need and agreed upon order of fields.
 288 //
 289 // The list of basic types that is returned starts with a T_VALUETYPE
 290 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID are used as
 291 // delimiters. Every entry between the two is a field of the value
 292 // type. If there's an embedded value type in the list, it also starts
 293 // with a T_VALUETYPE and ends with a T_VOID. This is so we can
 294 // generate a unique fingerprint for the method's adapters and we can
 295 // generate the list of basic types from the interpreter point of view
 296 // (value types passed as reference: iterate on the list until a
 297 // T_VALUETYPE, drop everything until and including the closing
 298 // T_VOID) or the compiler point of view (each field of the value
 299 // types is an argument: drop all T_VALUETYPE/T_VOID from the list).
 300 GrowableArray<SigEntry> ValueKlass::collect_fields(int base_off) const {
 301   GrowableArray<SigEntry> sig_extended;
 302   sig_extended.push(SigEntry(T_VALUETYPE, base_off));
 303   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
 304     if (fs.access_flags().is_static()) continue;
 305     fieldDescriptor& fd = fs.field_descriptor();
 306     BasicType bt = fd.field_type();
 307     int offset = base_off + fd.offset() - (base_off > 0 ? first_field_offset() : 0);
 308     if (bt == T_VALUETYPE) {
 309       if (fd.is_flattened()) {
 310         Symbol* signature = fd.signature();
 311         JavaThread* THREAD = JavaThread::current();
 312         oop loader = class_loader();
 313         oop domain = protection_domain();
 314         ResetNoHandleMark rnhm;
 315         HandleMark hm;
 316         NoSafepointVerifier nsv;
 317         Klass* klass = SystemDictionary::resolve_or_null(signature,
 318                                                          Handle(THREAD, loader), Handle(THREAD, domain),
 319                                                          THREAD);
 320         assert(klass != NULL && !HAS_PENDING_EXCEPTION, "lookup shouldn't fail");
 321         const GrowableArray<SigEntry>& embedded = ValueKlass::cast(klass)->collect_fields(offset);
 322         sig_extended.appendAll(&embedded);
 323       } else {
 324         sig_extended.push(SigEntry(T_VALUETYPEPTR, offset));
 325       }
 326     } else {
 327       sig_extended.push(SigEntry(bt, offset));
 328       if (bt == T_LONG || bt == T_DOUBLE) {
 329         sig_extended.push(SigEntry(T_VOID, offset));
 330       }
 331     }
 332   }
 333   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0);
 334   sig_extended.push(SigEntry(T_VOID, offset)); // hack: use T_VOID to mark end of value type fields
 335   if (base_off == 0) {
 336     sig_extended.sort(SigEntry::compare);
 337   }
 338   assert(sig_extended.at(0)._bt == T_VALUETYPE && sig_extended.at(sig_extended.length()-1)._bt == T_VOID, "broken structure");
 339   return sig_extended;
 340 }
 341 
 342 void ValueKlass::initialize_calling_convention() {
 343   // Because the pack and unpack handler addresses need to be loadable from generated code,
 344   // they are stored at a fixed offset in the klass metadata. Since value type klasses do
 345   // not have a vtable, the vtable offset is used to store these addresses.
 346   //guarantee(vtable_length() == 0, "vtables are not supported in value klasses");
 347   if (ValueTypeReturnedAsFields || ValueTypePassFieldsAsArgs) {
 348     Thread* THREAD = Thread::current();
 349     assert(!HAS_PENDING_EXCEPTION, "should have no exception");
 350     ResourceMark rm;
 351     const GrowableArray<SigEntry>& sig_vk = collect_fields();
 352     int nb_fields = SigEntry::count_fields(sig_vk)+1;
 353     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK_AND_CLEAR);
 354     *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
 355     for (int i = 0; i < sig_vk.length(); i++) {
 356       extended_sig->at_put(i, sig_vk.at(i));
 357     }
 358 
 359     if (ValueTypeReturnedAsFields) {
 360       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
 361       sig_bt[0] = T_METADATA;
 362       SigEntry::fill_sig_bt(sig_vk, sig_bt+1, nb_fields-1, true);
 363       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
 364       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
 365 
 366       if (total > 0) {
 367         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK_AND_CLEAR);
 368         *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
 369         for (int i = 0; i < nb_fields; i++) {
 370           return_regs->at_put(i, regs[i]);
 371         }
 372 
 373         BufferedValueTypeBlob* buffered_blob = SharedRuntime::generate_buffered_value_type_adapter(this);
 374         *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
 375         *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
 376         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
 377       }
 378     }
 379   }
 380 }
 381 
 382 void ValueKlass::deallocate_contents(ClassLoaderData* loader_data) {
 383   if (extended_sig() != NULL) {
 384     MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
 385   }
 386   if (return_regs() != NULL) {
 387     MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
 388   }
 389   cleanup_blobs();
 390   InstanceKlass::deallocate_contents(loader_data);
 391 }
 392 
 393 void ValueKlass::cleanup(ValueKlass* ik) {
 394   ik->cleanup_blobs();
 395 }
 396 
 397 void ValueKlass::cleanup_blobs() {
 398   if (pack_handler() != NULL) {
 399     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
 400     assert(buffered_blob->is_buffered_value_type_blob(), "bad blob type");
 401     BufferBlob::free((BufferBlob*)buffered_blob);
 402     *((address*)adr_pack_handler()) = NULL;
 403     *((address*)adr_unpack_handler()) = NULL;
 404   }
 405 }
 406 
 407 // Can this value type be returned as multiple values?
 408 bool ValueKlass::can_be_returned_as_fields() const {
 409   return return_regs() != NULL;
 410 }
 411 
 412 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
 413 void ValueKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 414   Thread* thread = Thread::current();
 415   const Array<SigEntry>* sig_vk = extended_sig();
 416   const Array<VMRegPair>* regs = return_regs();
 417   int j = 1;
 418 
 419   for (int i = 0; i < sig_vk->length(); i++) {
 420     BasicType bt = sig_vk->at(i)._bt;
 421     if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {
 422       int off = sig_vk->at(i)._offset;
 423       VMRegPair pair = regs->at(j);
 424       address loc = reg_map.location(pair.first());
 425       oop v = *(oop*)loc;
 426       assert(v == NULL || oopDesc::is_oop(v), "not an oop?");
 427       assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
 428       handles.push(Handle(thread, v));
 429     }
 430     if (bt == T_VALUETYPE) {
 431       continue;
 432     }
 433     if (bt == T_VOID &&
 434         sig_vk->at(i-1)._bt != T_LONG &&
 435         sig_vk->at(i-1)._bt != T_DOUBLE) {
 436       continue;
 437     }
 438     j++;
 439   }
 440   assert(j == regs->length(), "missed a field?");
 441 }
 442 
 443 // Update oop fields in registers from handles after a safepoint
 444 void ValueKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 445   assert(ValueTypeReturnedAsFields, "inconsistent");
 446   const Array<SigEntry>* sig_vk = extended_sig();
 447   const Array<VMRegPair>* regs = return_regs();
 448   assert(regs != NULL, "inconsistent");
 449 
 450   int j = 1;
 451   for (int i = 0, k = 0; i < sig_vk->length(); i++) {
 452     BasicType bt = sig_vk->at(i)._bt;
 453     if (bt == T_OBJECT || bt == T_ARRAY) {
 454       int off = sig_vk->at(i)._offset;
 455       VMRegPair pair = regs->at(j);
 456       address loc = reg_map.location(pair.first());
 457       *(oop*)loc = handles.at(k++)();
 458     }
 459     if (bt == T_VALUETYPE) {
 460       continue;
 461     }
 462     if (bt == T_VOID &&
 463         sig_vk->at(i-1)._bt != T_LONG &&
 464         sig_vk->at(i-1)._bt != T_DOUBLE) {
 465       continue;
 466     }
 467     j++;
 468   }
 469   assert(j == regs->length(), "missed a field?");
 470 }
 471 
 472 // Fields are in registers. Create an instance of the value type and
 473 // initialize it with the values of the fields.
 474 oop ValueKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, bool buffered, TRAPS) {
 475   bool ignored = false;
 476   oop new_vt = NULL;
 477   if (buffered) {
 478     new_vt = allocate_buffered_or_heap_instance(&ignored, CHECK_NULL);
 479   } else {
 480     new_vt = allocate_instance(CHECK_NULL);
 481   }
 482 
 483   const Array<SigEntry>* sig_vk = extended_sig();
 484   const Array<VMRegPair>* regs = return_regs();
 485 
 486   int j = 1;
 487   int k = 0;
 488   for (int i = 0; i < sig_vk->length(); i++) {
 489     BasicType bt = sig_vk->at(i)._bt;
 490     if (bt == T_VALUETYPE) {
 491       continue;
 492     }
 493     if (bt == T_VOID) {
 494       if (sig_vk->at(i-1)._bt == T_LONG ||
 495           sig_vk->at(i-1)._bt == T_DOUBLE) {
 496         j++;
 497       }
 498       continue;
 499     }
 500     int off = sig_vk->at(i)._offset;
 501     VMRegPair pair = regs->at(j);
 502     address loc = reg_map.location(pair.first());
 503     switch(bt) {
 504     case T_BOOLEAN: {
 505       jboolean v = *(intptr_t*)loc;
 506       *(jboolean*)((address)new_vt + off) = v;
 507       break;
 508     }
 509     case T_CHAR: {
 510       jchar v = *(intptr_t*)loc;
 511       *(jchar*)((address)new_vt + off) = v;
 512       break;
 513     }
 514     case T_BYTE: {
 515       jbyte v = *(intptr_t*)loc;
 516       *(jbyte*)((address)new_vt + off) = v;
 517       break;
 518     }
 519     case T_SHORT: {
 520       jshort v = *(intptr_t*)loc;
 521       *(jshort*)((address)new_vt + off) = v;
 522       break;
 523     }
 524     case T_INT: {
 525       jint v = *(intptr_t*)loc;
 526       *(jint*)((address)new_vt + off) = v;
 527       break;
 528     }
 529     case T_LONG: {
 530 #ifdef _LP64
 531       jlong v = *(intptr_t*)loc;
 532       *(jlong*)((address)new_vt + off) = v;
 533 #else
 534       Unimplemented();
 535 #endif
 536       break;
 537     }
 538     case T_OBJECT:
 539     case T_VALUETYPEPTR:
 540     case T_ARRAY: {
 541       Handle handle = handles.at(k++);
 542       HeapAccess<>::oop_store_at(new_vt, off, handle());
 543       break;
 544     }
 545     case T_FLOAT: {
 546       jfloat v = *(jfloat*)loc;
 547       *(jfloat*)((address)new_vt + off) = v;
 548       break;
 549     }
 550     case T_DOUBLE: {
 551       jdouble v = *(jdouble*)loc;
 552       *(jdouble*)((address)new_vt + off) = v;
 553       break;
 554     }
 555     default:
 556       ShouldNotReachHere();
 557     }
 558     *(intptr_t*)loc = 0xDEAD;
 559     j++;
 560   }
 561   assert(j == regs->length(), "missed a field?");
 562   assert(k == handles.length(), "missed an oop?");
 563   return new_vt;
 564 }
 565 
 566 // Check the return register for a ValueKlass oop
 567 ValueKlass* ValueKlass::returned_value_klass(const RegisterMap& map) {
 568   BasicType bt = T_METADATA;
 569   VMRegPair pair;
 570   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
 571   assert(nb == 1, "broken");
 572 
 573   address loc = map.location(pair.first());
 574   intptr_t ptr = *(intptr_t*)loc;
 575   if (is_set_nth_bit(ptr, 0)) {
 576     // Oop is tagged, must be a ValueKlass oop
 577     clear_nth_bit(ptr, 0);
 578     assert(Metaspace::contains((void*)ptr), "should be klass");
 579     ValueKlass* vk = (ValueKlass*)ptr;
 580     assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
 581     return vk;
 582   }
 583 #ifdef ASSERT
 584   // Oop is not tagged, must be a valid oop
 585   if (VerifyOops) {
 586     oopDesc::verify(oop((HeapWord*)ptr));
 587   }
 588 #endif
 589   return NULL;
 590 }
 591 
 592 void ValueKlass::iterate_over_inside_oops(OopClosure* f, oop value) {
 593   assert(!Universe::heap()->is_in_reserved(value), "This method is used on buffered values");
 594 
 595   oop* addr_mirror = (oop*)(value)->mark_addr_raw();
 596   f->do_oop_no_buffering(addr_mirror);
 597 
 598   if (!contains_oops()) return;
 599 
 600   OopMapBlock* map = start_of_nonstatic_oop_maps();
 601   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
 602 
 603   if (!UseCompressedOops) {
 604     for (; map < end_map; map++) {
 605       oop* p = (oop*) (((char*)(oopDesc*)value) + map->offset());
 606       oop* const end = p + map->count();
 607       for (; p < end; ++p) {
 608         assert(oopDesc::is_oop_or_null(*p), "Sanity check");
 609         f->do_oop(p);
 610       }
 611     }
 612   } else {
 613     for (; map < end_map; map++) {
 614       narrowOop* p = (narrowOop*) (((char*)(oopDesc*)value) + map->offset());
 615       narrowOop* const end = p + map->count();
 616       for (; p < end; ++p) {
 617         oop o = CompressedOops::decode(*p);
 618         assert(Universe::heap()->is_in_reserved_or_null(o), "Sanity check");
 619         assert(oopDesc::is_oop_or_null(o), "Sanity check");
 620         f->do_oop(p);
 621       }
 622     }
 623   }
 624 }
 625 
 626 void ValueKlass::verify_on(outputStream* st) {
 627   InstanceKlass::verify_on(st);
 628   guarantee(prototype_header()->is_always_locked(), "Prototype header is not always locked");
 629 }
 630 
 631 void ValueKlass::oop_verify_on(oop obj, outputStream* st) {
 632   InstanceKlass::oop_verify_on(obj, st);
 633   guarantee(obj->mark()->is_always_locked(), "Header is not always locked");
 634 }