1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/gcLocker.inline.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "logging/log.hpp"

  29 #include "oops/oop.inline.hpp"
  30 #include "oops/fieldStreams.hpp"
  31 #include "oops/method.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "oops/valueKlass.hpp"
  34 #include "oops/valueArrayKlass.hpp"
  35 #include "runtime/signature.hpp"
  36 #include "utilities/copy.hpp"
  37 
  38 int ValueKlass::first_field_offset() const {
  39 #ifdef ASSERT
  40   int first_offset = INT_MAX;
  41   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  42     if (fs.offset() < first_offset) first_offset= fs.offset();
  43   }
  44 #endif
  45   int base_offset = instanceOopDesc::base_offset_in_bytes();
  46   // The first field of value types is aligned on a long boundary
  47   base_offset = align_size_up(base_offset, BytesPerLong);
  48   assert(base_offset == first_offset, "inconsistent offsets");
  49   return base_offset;
  50 }
  51 
  52 int ValueKlass::raw_value_byte_size() const {
  53   assert(this != SystemDictionary::___Value_klass(),
  54       "This is not the value type klass you are looking for");
  55   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  56   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  57   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  58   if (heapOopAlignedSize >= longSize || contains_oops()) {
  59     return heapOopAlignedSize;
  60   }
  61   // Small primitives...
  62   // If a few small basic type fields, return the actual size, i.e.
  63   // 1 byte = 1
  64   // 2 byte = 2
  65   // 3 byte = 4, because pow2 needed for element stores
  66   int first_offset = first_field_offset();
  67   int last_offset  = 0; // find the last offset, add basic type size
  68   int last_tsz     = 0;
  69   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  70     if (fs.offset() > last_offset) {
  71       BasicType type = fs.field_descriptor().field_type();
  72       if (is_java_primitive(type)) {
  73         last_tsz = type2aelembytes(type);
  74       } else if (type == T_VALUETYPE) {
  75         // Not just primitives. Layout aligns embedded value, so use jlong aligned it is
  76         return heapOopAlignedSize;
  77       } else {
  78         guarantee(0, "Unknown type %d", type);
  79       }
  80       assert(last_tsz != 0, "Invariant");
  81       last_offset = fs.offset();
  82     }
  83   }
  84   // Assumes VT with no fields are meaningless and illegal
  85   last_offset += last_tsz;
  86   assert(last_offset > first_offset && last_tsz, "Invariant");
  87   return 1 << upper_log2(last_offset - first_offset);
  88 }
  89 
  90 instanceOop ValueKlass::allocate_instance(TRAPS) {
  91   int size = size_helper();  // Query before forming handle.
  92 
  93   return (instanceOop)CollectedHeap::obj_allocate(this, size, CHECK_NULL);
  94 }
  95 
  96 instanceOop ValueKlass::allocate_buffered_or_heap_instance(bool* in_heap, TRAPS) {
  97   assert(THREAD->is_Java_thread(), "Only Java threads can call this method");
  98 
  99   instanceOop value = NULL;
 100   if (is_bufferable()) {
 101     value = (instanceOop)VTBuffer::allocate_value(this, CHECK_NULL);
 102     *in_heap = false;
 103   }
 104   if (value == NULL) {
 105     log_info(valuetypes)("Value buffering failed, allocating in the Java heap");
 106     value = allocate_instance(CHECK_NULL);
 107     *in_heap = true;
 108   }
 109   return value;
 110 }
 111 
 112 bool ValueKlass::is_atomic() {
 113   return (nonstatic_field_size() * heapOopSize) <= longSize;
 114 }
 115 
 116 int ValueKlass::nonstatic_oop_count() {
 117   int oops = 0;
 118   int map_count = nonstatic_oop_map_count();
 119   OopMapBlock* block = start_of_nonstatic_oop_maps();
 120   OopMapBlock* end = block + map_count;
 121   while (block != end) {
 122     oops += block->count();
 123     block++;
 124   }
 125   return oops;
 126 }
 127 
 128 // Arrays of...
 129 
 130 bool ValueKlass::flatten_array() {
 131   if (!ValueArrayFlatten) {
 132     return false;
 133   }
 134 
 135   int elem_bytes = raw_value_byte_size();
 136   // Too big
 137   if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) {
 138     return false;
 139   }
 140   // Too many embedded oops
 141   if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) {
 142     return false;
 143   }
 144 
 145   return true;
 146 }
 147 
 148 
 149 Klass* ValueKlass::array_klass_impl(bool or_null, int n, TRAPS) {
 150   if (!flatten_array()) {
 151     return InstanceKlass::array_klass_impl(or_null, n, THREAD);
 152   }
 153 
 154   // Basically the same as instanceKlass, but using "ValueArrayKlass::allocate_klass"
 155   if (array_klasses() == NULL) {
 156     if (or_null) return NULL;
 157 
 158     ResourceMark rm;
 159     JavaThread *jt = (JavaThread *)THREAD;
 160     {
 161       // Atomic creation of array_klasses
 162       MutexLocker mc(Compile_lock, THREAD);   // for vtables
 163       MutexLocker ma(MultiArray_lock, THREAD);
 164 
 165       // Check if update has already taken place
 166       if (array_klasses() == NULL) {
 167         Klass* ak;
 168         if (is_atomic() || (!ValueArrayAtomicAccess)) {
 169           ak = ValueArrayKlass::allocate_klass(this, CHECK_NULL);
 170         } else {
 171           ak = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, CHECK_NULL);
 172         }
 173         set_array_klasses(ak);
 174       }
 175     }
 176   }
 177   // _this will always be set at this point
 178   ArrayKlass* ak = ArrayKlass::cast(array_klasses());
 179   if (or_null) {
 180     return ak->array_klass_or_null(n);
 181   }
 182   return ak->array_klass(n, THREAD);
 183 }
 184 
 185 Klass* ValueKlass::array_klass_impl(bool or_null, TRAPS) {
 186   return array_klass_impl(or_null, 1, THREAD);
 187 }
 188 
 189 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {
 190   /*
 191    * Try not to shear fields even if not an atomic store...
 192    *
 193    * First 3 cases handle value array store, otherwise works on the same basis
 194    * as JVM_Clone, at this size data is aligned. The order of primitive types
 195    * is largest to smallest, and it not possible for fields to stradle long
 196    * copy boundaries.
 197    *
 198    * If MT without exclusive access, possible to observe partial value store,
 199    * but not partial primitive and reference field values
 200    */
 201   switch (raw_byte_size) {
 202     case 1:
 203       *((jbyte*) dst) = *(jbyte*)src;
 204       break;
 205     case 2:
 206       *((jshort*) dst) = *(jshort*)src;
 207       break;
 208     case 4:
 209       *((jint*) dst) = *(jint*) src;
 210       break;
 211     default:
 212       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 213       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 214   }
 215 }
 216 
 217 /*
 218  * Store the value of this klass contained with src into dst.
 219  *
 220  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 221  *
 222  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 223  * so raw point is "safe" for now.
 224  *
 225  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 226  *
 227  */
 228 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 229   if (contains_oops() && dst_heap) {
 230     // src/dst aren't oops, need offset to adjust oop map offset
 231     const address dst_oop_addr = ((address) dst) - first_field_offset();
 232 
 233     // Pre-barriers...
 234     OopMapBlock* map = start_of_nonstatic_oop_maps();
 235     OopMapBlock* const end = map + nonstatic_oop_map_count();
 236     while (map != end) {
 237       // Shame we can't just use the existing oop iterator...src/dst aren't oop
 238       address doop_address = dst_oop_addr + map->offset();
 239       if (UseCompressedOops) {
 240         oopDesc::bs()->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized);
 241       } else {
 242         oopDesc::bs()->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized);
 243       }
 244       map++;
 245     }
 246 
 247     raw_field_copy(src, dst, raw_byte_size);
 248 
 249     // Post-barriers...
 250     map = start_of_nonstatic_oop_maps();
 251     while (map != end) {
 252       address doop_address = dst_oop_addr + map->offset();
 253       oopDesc::bs()->write_ref_array((HeapWord*) doop_address, map->count());
 254       map++;
 255     }
 256   } else {   // Primitive-only case...
 257     raw_field_copy(src, dst, raw_byte_size);
 258   }
 259 }
 260 
 261 oop ValueKlass::box(Handle src, InstanceKlass* target_klass, TRAPS) {
 262   assert(src()->klass()->is_value(), "src must be a value type");
 263   assert(!target_klass->is_value(), "target_klass must not be a value type");
 264 
 265   target_klass->initialize(CHECK_0);
 266   instanceOop box = target_klass->allocate_instance(CHECK_0);
 267   value_store(data_for_oop(src()), data_for_oop(box), true, false);
 268 
 269   assert(!box->klass()->is_value(), "Sanity check");
 270   return box;
 271 }
 272 
 273 oop ValueKlass::unbox(Handle src, InstanceKlass* target_klass, TRAPS) {
 274   assert(!src()->klass()->is_value(), "src must not be a value type");
 275   assert(target_klass->is_value(), "target_klass must be a value type");
 276   ValueKlass* vtklass = ValueKlass::cast(target_klass);
 277 
 278   vtklass->initialize(CHECK_0);
 279   bool in_heap;
 280   instanceOop value = vtklass->allocate_buffered_or_heap_instance(&in_heap, CHECK_0);
 281   value_store(data_for_oop(src()), data_for_oop(value), in_heap, false);
 282 
 283   assert(value->klass()->is_value(), "Sanity check");
 284   return value;
 285 }
 286 
 287 // Value type arguments are not passed by reference, instead each
 288 // field of the value type is passed as an argument. This helper
 289 // function collects the fields of the value types (including embedded
 290 // value type's fields) in a list. Included with the field's type is
 291 // the offset of each field in the value type: i2c and c2i adapters
 292 // need that to load or store fields. Finally, the list of fields is
 293 // sorted in order of increasing offsets: the adapters and the
 294 // compiled code need and agreed upon order of fields.
 295 //
 296 // The list of basic types that is returned starts with a T_VALUETYPE
 297 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID are used as
 298 // delimiters. Every entry between the two is a field of the value
 299 // type. If there's an embedded value type in the list, it also starts
 300 // with a T_VALUETYPE and ends with a T_VOID. This is so we can
 301 // generate a unique fingerprint for the method's adapters and we can
 302 // generate the list of basic types from the interpreter point of view
 303 // (value types passed as reference: iterate on the list until a
 304 // T_VALUETYPE, drop everything until and including the closing
 305 // T_VOID) or the compiler point of view (each field of the value
 306 // types is an argument: drop all T_VALUETYPE/T_VOID from the list).
 307 GrowableArray<SigEntry> ValueKlass::collect_fields(int base_off) const {
 308   GrowableArray<SigEntry> sig_extended;
 309   sig_extended.push(SigEntry(T_VALUETYPE, base_off));
 310   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
 311     if (fs.access_flags().is_static())  continue;
 312     fieldDescriptor& fd = fs.field_descriptor();
 313     BasicType bt = fd.field_type();
 314     int offset = base_off + fd.offset() - (base_off > 0 ? first_field_offset() : 0);
 315     if (bt == T_VALUETYPE) {
 316       Symbol* signature = fd.signature();
 317       JavaThread* THREAD = JavaThread::current();
 318       oop loader = class_loader();
 319       oop domain = protection_domain();
 320       ResetNoHandleMark rnhm;
 321       HandleMark hm;
 322       NoSafepointVerifier nsv;
 323       Klass* klass = SystemDictionary::resolve_or_null(signature,
 324                                                        Handle(THREAD, loader), Handle(THREAD, domain),
 325                                                        THREAD);
 326       assert(klass != NULL && !HAS_PENDING_EXCEPTION, "lookup shouldn't fail");
 327       const GrowableArray<SigEntry>& embedded = ValueKlass::cast(klass)->collect_fields(offset);
 328       sig_extended.appendAll(&embedded);
 329     } else {
 330       sig_extended.push(SigEntry(bt, offset));
 331       if (bt == T_LONG || bt == T_DOUBLE) {
 332         sig_extended.push(SigEntry(T_VOID, offset));
 333       }
 334     }
 335   }
 336   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0);
 337   sig_extended.push(SigEntry(T_VOID, offset)); // hack: use T_VOID to mark end of value type fields
 338   if (base_off == 0) {
 339     sig_extended.sort(SigEntry::compare);
 340   }
 341   assert(sig_extended.at(0)._bt == T_VALUETYPE && sig_extended.at(sig_extended.length()-1)._bt == T_VOID, "broken structure");
 342   return sig_extended;
 343 }
 344 
 345 // Returns the basic types and registers for fields to return an
 346 // instance of this value type in registers if possible.
 347 GrowableArray<SigEntry> ValueKlass::return_convention(VMRegPair*& regs, int& nb_fields) const {
 348   assert(ValueTypeReturnedAsFields, "inconsistent");
 349   const GrowableArray<SigEntry>& sig_vk = collect_fields();
 350   nb_fields = SigEntry::count_fields(sig_vk)+1;






 351   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
 352   sig_bt[0] = T_METADATA;
 353   SigEntry::fill_sig_bt(sig_vk, sig_bt+1, nb_fields-1, true);
 354   regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
 355   int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
 356 
 357   if (total <= 0) {
 358     regs = NULL;




 359   }
 360   
 361   return sig_vk;
 362 }
 363 
 364 // Create handles for all oop fields returned in registers that are
 365 // going to be live across a safepoint.
 366 bool ValueKlass::save_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 367   if (ValueTypeReturnedAsFields) {
 368     int nb_fields;
 369     VMRegPair* regs;
 370     const GrowableArray<SigEntry>& sig_vk = return_convention(regs, nb_fields);
 371     
 372     if (regs != NULL) {
 373       regs++;
 374       nb_fields--;
 375       save_oop_fields(sig_vk, reg_map, regs, handles, nb_fields);
 376       return true;
 377     }
 378   }
 379   return false;
 380 }
 381 
 382 // Same as above but with pre-computed return convention
 383 void ValueKlass::save_oop_fields(const GrowableArray<SigEntry>& sig_vk, RegisterMap& reg_map, const VMRegPair* regs, GrowableArray<Handle>& handles, int nb_fields) const {
 384   int j = 0;
 385   Thread* thread = Thread::current();
 386   for (int i = 0; i < sig_vk.length(); i++) {
 387     BasicType bt = sig_vk.at(i)._bt;




 388     if (bt == T_OBJECT || bt == T_ARRAY) {
 389       int off = sig_vk.at(i)._offset;
 390       VMRegPair pair = regs[j];
 391       address loc = reg_map.location(pair.first());
 392       oop v = *(oop*)loc;
 393       assert(v == NULL || v->is_oop(), "not an oop?");
 394       assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
 395       handles.push(Handle(thread, v));
 396     }
 397     if (bt == T_VALUETYPE) {
 398       continue;
 399     }
 400     if (bt == T_VOID &&
 401         sig_vk.at(i-1)._bt != T_LONG &&
 402         sig_vk.at(i-1)._bt != T_DOUBLE) {
 403       continue;
 404     }
 405     j++;
 406   }
 407   assert(j == nb_fields, "missed a field?");
 408 }
 409 
 410 // Update oop fields in registers from handles after a safepoint
 411 void ValueKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
 412   assert(ValueTypeReturnedAsFields, "inconsistent");
 413   int nb_fields;
 414   VMRegPair* regs;
 415   const GrowableArray<SigEntry>& sig_vk = return_convention(regs, nb_fields);
 416   assert(regs != NULL, "inconsistent");
 417 
 418   regs++;
 419   nb_fields--;
 420 
 421   int j = 0;
 422   for (int i = 0, k = 0; i < sig_vk.length(); i++) {
 423     BasicType bt = sig_vk.at(i)._bt;
 424     if (bt == T_OBJECT || bt == T_ARRAY) {
 425       int off = sig_vk.at(i)._offset;
 426       VMRegPair pair = regs[j];
 427       address loc = reg_map.location(pair.first());
 428       *(oop*)loc = handles.at(k++)();
 429     }
 430     if (bt == T_VALUETYPE) {
 431       continue;
 432     }
 433     if (bt == T_VOID &&
 434         sig_vk.at(i-1)._bt != T_LONG &&
 435         sig_vk.at(i-1)._bt != T_DOUBLE) {
 436       continue;
 437     }
 438     j++;
 439   }
 440   assert(j == nb_fields, "missed a field?");
 441 }
 442 
 443 // Fields are in registers. Create an instance of the value type and
 444 // initialize it with the values of the fields.
 445 oop ValueKlass::realloc_result(const GrowableArray<SigEntry>& sig_vk, const RegisterMap& reg_map, const VMRegPair* regs,
 446                                const GrowableArray<Handle>& handles, int nb_fields, TRAPS) {
 447   oop new_vt = allocate_instance(CHECK_NULL);
 448 
 449   int j = 0;



 450   int k = 0;
 451   for (int i = 0; i < sig_vk.length(); i++) {
 452     BasicType bt = sig_vk.at(i)._bt;
 453     if (bt == T_VALUETYPE) {
 454       continue;
 455     } 
 456     if (bt == T_VOID) {
 457       if (sig_vk.at(i-1)._bt == T_LONG ||
 458           sig_vk.at(i-1)._bt == T_DOUBLE) {
 459         j++;
 460       }
 461       continue;
 462     }
 463     int off = sig_vk.at(i)._offset;
 464     VMRegPair pair = regs[j];
 465     address loc = reg_map.location(pair.first());
 466     switch(bt) {
 467     case T_BOOLEAN: {
 468       jboolean v = *(intptr_t*)loc;
 469       *(jboolean*)((address)new_vt + off) = v;
 470       break;
 471     }
 472     case T_CHAR: {
 473       jchar v = *(intptr_t*)loc;
 474       *(jchar*)((address)new_vt + off) = v;
 475       break;
 476     }
 477     case T_BYTE: {
 478       jbyte v = *(intptr_t*)loc;
 479       *(jbyte*)((address)new_vt + off) = v;
 480       break;
 481     }
 482     case T_SHORT: {
 483       jshort v = *(intptr_t*)loc;
 484       *(jshort*)((address)new_vt + off) = v;
 485       break;
 486     }
 487     case T_INT: {
 488       jint v = *(intptr_t*)loc;
 489       *(jint*)((address)new_vt + off) = v;
 490       break;
 491     }
 492     case T_LONG: {
 493 #ifdef _LP64
 494       jlong v = *(intptr_t*)loc;
 495       *(jlong*)((address)new_vt + off) = v;
 496 #else
 497       Unimplemented();
 498 #endif
 499       break;
 500     }
 501     case T_OBJECT:
 502     case T_ARRAY: {
 503       Handle handle = handles.at(k++);
 504       oop v = handle();
 505       if (!UseCompressedOops) {
 506         oop* p = (oop*)((address)new_vt + off);
 507         oopDesc::store_heap_oop(p, v);
 508       } else {
 509         narrowOop* p = (narrowOop*)((address)new_vt + off);
 510         oopDesc::encode_store_heap_oop(p, v);
 511       }
 512       break;
 513     }
 514     case T_FLOAT: {
 515       jfloat v = *(jfloat*)loc;
 516       *(jfloat*)((address)new_vt + off) = v;
 517       break;
 518     }
 519     case T_DOUBLE: {
 520       jdouble v = *(jdouble*)loc;
 521       *(jdouble*)((address)new_vt + off) = v;
 522       break;
 523     }
 524     default:
 525       ShouldNotReachHere();
 526     }
 527     j++;
 528   }
 529   assert(j == nb_fields, "missed a field?");
 530   assert(k == handles.length(), "missed an oop?");
 531   return new_vt;
 532 }
 533 
 534 ValueKlass* ValueKlass::returned_value_type(const RegisterMap& map) {
 535   BasicType bt = T_METADATA;
 536   VMRegPair pair;
 537   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
 538   assert(nb == 1, "broken");
 539   
 540   address loc = map.location(pair.first());
 541   intptr_t ptr = *(intptr_t*)loc;
 542   if (Metaspace::contains((void*)ptr)) {


 543     return (ValueKlass*)ptr;
 544   }
 545   return NULL;
 546 //  if (Universe::heap()->is_in_reserved((void*)ptr)) {
 547 //    return NULL;
 548 //  }
 549 //  return (ValueKlass*)ptr;
 550 }
 551 
--- EOF ---